scrub.c 90 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351
  1. /*
  2. * Copyright (C) 2011, 2012 STRATO. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/blkdev.h>
  19. #include <linux/ratelimit.h>
  20. #include "ctree.h"
  21. #include "volumes.h"
  22. #include "disk-io.h"
  23. #include "ordered-data.h"
  24. #include "transaction.h"
  25. #include "backref.h"
  26. #include "extent_io.h"
  27. #include "dev-replace.h"
  28. #include "check-integrity.h"
  29. #include "rcu-string.h"
  30. #include "raid56.h"
  31. /*
  32. * This is only the first step towards a full-features scrub. It reads all
  33. * extent and super block and verifies the checksums. In case a bad checksum
  34. * is found or the extent cannot be read, good data will be written back if
  35. * any can be found.
  36. *
  37. * Future enhancements:
  38. * - In case an unrepairable extent is encountered, track which files are
  39. * affected and report them
  40. * - track and record media errors, throw out bad devices
  41. * - add a mode to also read unallocated space
  42. */
  43. struct scrub_block;
  44. struct scrub_ctx;
  45. /*
  46. * the following three values only influence the performance.
  47. * The last one configures the number of parallel and outstanding I/O
  48. * operations. The first two values configure an upper limit for the number
  49. * of (dynamically allocated) pages that are added to a bio.
  50. */
  51. #define SCRUB_PAGES_PER_RD_BIO 32 /* 128k per bio */
  52. #define SCRUB_PAGES_PER_WR_BIO 32 /* 128k per bio */
  53. #define SCRUB_BIOS_PER_SCTX 64 /* 8MB per device in flight */
  54. /*
  55. * the following value times PAGE_SIZE needs to be large enough to match the
  56. * largest node/leaf/sector size that shall be supported.
  57. * Values larger than BTRFS_STRIPE_LEN are not supported.
  58. */
  59. #define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */
  60. struct scrub_page {
  61. struct scrub_block *sblock;
  62. struct page *page;
  63. struct btrfs_device *dev;
  64. u64 flags; /* extent flags */
  65. u64 generation;
  66. u64 logical;
  67. u64 physical;
  68. u64 physical_for_dev_replace;
  69. atomic_t ref_count;
  70. struct {
  71. unsigned int mirror_num:8;
  72. unsigned int have_csum:1;
  73. unsigned int io_error:1;
  74. };
  75. u8 csum[BTRFS_CSUM_SIZE];
  76. };
  77. struct scrub_bio {
  78. int index;
  79. struct scrub_ctx *sctx;
  80. struct btrfs_device *dev;
  81. struct bio *bio;
  82. int err;
  83. u64 logical;
  84. u64 physical;
  85. #if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
  86. struct scrub_page *pagev[SCRUB_PAGES_PER_WR_BIO];
  87. #else
  88. struct scrub_page *pagev[SCRUB_PAGES_PER_RD_BIO];
  89. #endif
  90. int page_count;
  91. int next_free;
  92. struct btrfs_work work;
  93. };
  94. struct scrub_block {
  95. struct scrub_page *pagev[SCRUB_MAX_PAGES_PER_BLOCK];
  96. int page_count;
  97. atomic_t outstanding_pages;
  98. atomic_t ref_count; /* free mem on transition to zero */
  99. struct scrub_ctx *sctx;
  100. struct {
  101. unsigned int header_error:1;
  102. unsigned int checksum_error:1;
  103. unsigned int no_io_error_seen:1;
  104. unsigned int generation_error:1; /* also sets header_error */
  105. };
  106. };
  107. struct scrub_wr_ctx {
  108. struct scrub_bio *wr_curr_bio;
  109. struct btrfs_device *tgtdev;
  110. int pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */
  111. atomic_t flush_all_writes;
  112. struct mutex wr_lock;
  113. };
  114. struct scrub_ctx {
  115. struct scrub_bio *bios[SCRUB_BIOS_PER_SCTX];
  116. struct btrfs_root *dev_root;
  117. int first_free;
  118. int curr;
  119. atomic_t bios_in_flight;
  120. atomic_t workers_pending;
  121. spinlock_t list_lock;
  122. wait_queue_head_t list_wait;
  123. u16 csum_size;
  124. struct list_head csum_list;
  125. atomic_t cancel_req;
  126. int readonly;
  127. int pages_per_rd_bio;
  128. u32 sectorsize;
  129. u32 nodesize;
  130. u32 leafsize;
  131. int is_dev_replace;
  132. struct scrub_wr_ctx wr_ctx;
  133. /*
  134. * statistics
  135. */
  136. struct btrfs_scrub_progress stat;
  137. spinlock_t stat_lock;
  138. };
  139. struct scrub_fixup_nodatasum {
  140. struct scrub_ctx *sctx;
  141. struct btrfs_device *dev;
  142. u64 logical;
  143. struct btrfs_root *root;
  144. struct btrfs_work work;
  145. int mirror_num;
  146. };
  147. struct scrub_copy_nocow_ctx {
  148. struct scrub_ctx *sctx;
  149. u64 logical;
  150. u64 len;
  151. int mirror_num;
  152. u64 physical_for_dev_replace;
  153. struct btrfs_work work;
  154. };
  155. struct scrub_warning {
  156. struct btrfs_path *path;
  157. u64 extent_item_size;
  158. char *scratch_buf;
  159. char *msg_buf;
  160. const char *errstr;
  161. sector_t sector;
  162. u64 logical;
  163. struct btrfs_device *dev;
  164. int msg_bufsize;
  165. int scratch_bufsize;
  166. };
  167. static void scrub_pending_bio_inc(struct scrub_ctx *sctx);
  168. static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
  169. static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx);
  170. static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx);
  171. static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
  172. static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
  173. struct btrfs_fs_info *fs_info,
  174. struct scrub_block *original_sblock,
  175. u64 length, u64 logical,
  176. struct scrub_block *sblocks_for_recheck);
  177. static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
  178. struct scrub_block *sblock, int is_metadata,
  179. int have_csum, u8 *csum, u64 generation,
  180. u16 csum_size);
  181. static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
  182. struct scrub_block *sblock,
  183. int is_metadata, int have_csum,
  184. const u8 *csum, u64 generation,
  185. u16 csum_size);
  186. static void scrub_complete_bio_end_io(struct bio *bio, int err);
  187. static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
  188. struct scrub_block *sblock_good,
  189. int force_write);
  190. static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
  191. struct scrub_block *sblock_good,
  192. int page_num, int force_write);
  193. static void scrub_write_block_to_dev_replace(struct scrub_block *sblock);
  194. static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
  195. int page_num);
  196. static int scrub_checksum_data(struct scrub_block *sblock);
  197. static int scrub_checksum_tree_block(struct scrub_block *sblock);
  198. static int scrub_checksum_super(struct scrub_block *sblock);
  199. static void scrub_block_get(struct scrub_block *sblock);
  200. static void scrub_block_put(struct scrub_block *sblock);
  201. static void scrub_page_get(struct scrub_page *spage);
  202. static void scrub_page_put(struct scrub_page *spage);
  203. static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
  204. struct scrub_page *spage);
  205. static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
  206. u64 physical, struct btrfs_device *dev, u64 flags,
  207. u64 gen, int mirror_num, u8 *csum, int force,
  208. u64 physical_for_dev_replace);
  209. static void scrub_bio_end_io(struct bio *bio, int err);
  210. static void scrub_bio_end_io_worker(struct btrfs_work *work);
  211. static void scrub_block_complete(struct scrub_block *sblock);
  212. static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
  213. u64 extent_logical, u64 extent_len,
  214. u64 *extent_physical,
  215. struct btrfs_device **extent_dev,
  216. int *extent_mirror_num);
  217. static int scrub_setup_wr_ctx(struct scrub_ctx *sctx,
  218. struct scrub_wr_ctx *wr_ctx,
  219. struct btrfs_fs_info *fs_info,
  220. struct btrfs_device *dev,
  221. int is_dev_replace);
  222. static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx);
  223. static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
  224. struct scrub_page *spage);
  225. static void scrub_wr_submit(struct scrub_ctx *sctx);
  226. static void scrub_wr_bio_end_io(struct bio *bio, int err);
  227. static void scrub_wr_bio_end_io_worker(struct btrfs_work *work);
  228. static int write_page_nocow(struct scrub_ctx *sctx,
  229. u64 physical_for_dev_replace, struct page *page);
  230. static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
  231. void *ctx);
  232. static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
  233. int mirror_num, u64 physical_for_dev_replace);
  234. static void copy_nocow_pages_worker(struct btrfs_work *work);
  235. static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
  236. {
  237. atomic_inc(&sctx->bios_in_flight);
  238. }
  239. static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
  240. {
  241. atomic_dec(&sctx->bios_in_flight);
  242. wake_up(&sctx->list_wait);
  243. }
  244. /*
  245. * used for workers that require transaction commits (i.e., for the
  246. * NOCOW case)
  247. */
  248. static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx)
  249. {
  250. struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
  251. /*
  252. * increment scrubs_running to prevent cancel requests from
  253. * completing as long as a worker is running. we must also
  254. * increment scrubs_paused to prevent deadlocking on pause
  255. * requests used for transactions commits (as the worker uses a
  256. * transaction context). it is safe to regard the worker
  257. * as paused for all matters practical. effectively, we only
  258. * avoid cancellation requests from completing.
  259. */
  260. mutex_lock(&fs_info->scrub_lock);
  261. atomic_inc(&fs_info->scrubs_running);
  262. atomic_inc(&fs_info->scrubs_paused);
  263. mutex_unlock(&fs_info->scrub_lock);
  264. atomic_inc(&sctx->workers_pending);
  265. }
  266. /* used for workers that require transaction commits */
  267. static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx)
  268. {
  269. struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
  270. /*
  271. * see scrub_pending_trans_workers_inc() why we're pretending
  272. * to be paused in the scrub counters
  273. */
  274. mutex_lock(&fs_info->scrub_lock);
  275. atomic_dec(&fs_info->scrubs_running);
  276. atomic_dec(&fs_info->scrubs_paused);
  277. mutex_unlock(&fs_info->scrub_lock);
  278. atomic_dec(&sctx->workers_pending);
  279. wake_up(&fs_info->scrub_pause_wait);
  280. wake_up(&sctx->list_wait);
  281. }
  282. static void scrub_free_csums(struct scrub_ctx *sctx)
  283. {
  284. while (!list_empty(&sctx->csum_list)) {
  285. struct btrfs_ordered_sum *sum;
  286. sum = list_first_entry(&sctx->csum_list,
  287. struct btrfs_ordered_sum, list);
  288. list_del(&sum->list);
  289. kfree(sum);
  290. }
  291. }
  292. static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
  293. {
  294. int i;
  295. if (!sctx)
  296. return;
  297. scrub_free_wr_ctx(&sctx->wr_ctx);
  298. /* this can happen when scrub is cancelled */
  299. if (sctx->curr != -1) {
  300. struct scrub_bio *sbio = sctx->bios[sctx->curr];
  301. for (i = 0; i < sbio->page_count; i++) {
  302. WARN_ON(!sbio->pagev[i]->page);
  303. scrub_block_put(sbio->pagev[i]->sblock);
  304. }
  305. bio_put(sbio->bio);
  306. }
  307. for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
  308. struct scrub_bio *sbio = sctx->bios[i];
  309. if (!sbio)
  310. break;
  311. kfree(sbio);
  312. }
  313. scrub_free_csums(sctx);
  314. kfree(sctx);
  315. }
  316. static noinline_for_stack
  317. struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
  318. {
  319. struct scrub_ctx *sctx;
  320. int i;
  321. struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
  322. int pages_per_rd_bio;
  323. int ret;
  324. /*
  325. * the setting of pages_per_rd_bio is correct for scrub but might
  326. * be wrong for the dev_replace code where we might read from
  327. * different devices in the initial huge bios. However, that
  328. * code is able to correctly handle the case when adding a page
  329. * to a bio fails.
  330. */
  331. if (dev->bdev)
  332. pages_per_rd_bio = min_t(int, SCRUB_PAGES_PER_RD_BIO,
  333. bio_get_nr_vecs(dev->bdev));
  334. else
  335. pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
  336. sctx = kzalloc(sizeof(*sctx), GFP_NOFS);
  337. if (!sctx)
  338. goto nomem;
  339. sctx->is_dev_replace = is_dev_replace;
  340. sctx->pages_per_rd_bio = pages_per_rd_bio;
  341. sctx->curr = -1;
  342. sctx->dev_root = dev->dev_root;
  343. for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
  344. struct scrub_bio *sbio;
  345. sbio = kzalloc(sizeof(*sbio), GFP_NOFS);
  346. if (!sbio)
  347. goto nomem;
  348. sctx->bios[i] = sbio;
  349. sbio->index = i;
  350. sbio->sctx = sctx;
  351. sbio->page_count = 0;
  352. sbio->work.func = scrub_bio_end_io_worker;
  353. if (i != SCRUB_BIOS_PER_SCTX - 1)
  354. sctx->bios[i]->next_free = i + 1;
  355. else
  356. sctx->bios[i]->next_free = -1;
  357. }
  358. sctx->first_free = 0;
  359. sctx->nodesize = dev->dev_root->nodesize;
  360. sctx->leafsize = dev->dev_root->leafsize;
  361. sctx->sectorsize = dev->dev_root->sectorsize;
  362. atomic_set(&sctx->bios_in_flight, 0);
  363. atomic_set(&sctx->workers_pending, 0);
  364. atomic_set(&sctx->cancel_req, 0);
  365. sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy);
  366. INIT_LIST_HEAD(&sctx->csum_list);
  367. spin_lock_init(&sctx->list_lock);
  368. spin_lock_init(&sctx->stat_lock);
  369. init_waitqueue_head(&sctx->list_wait);
  370. ret = scrub_setup_wr_ctx(sctx, &sctx->wr_ctx, fs_info,
  371. fs_info->dev_replace.tgtdev, is_dev_replace);
  372. if (ret) {
  373. scrub_free_ctx(sctx);
  374. return ERR_PTR(ret);
  375. }
  376. return sctx;
  377. nomem:
  378. scrub_free_ctx(sctx);
  379. return ERR_PTR(-ENOMEM);
  380. }
  381. static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
  382. void *warn_ctx)
  383. {
  384. u64 isize;
  385. u32 nlink;
  386. int ret;
  387. int i;
  388. struct extent_buffer *eb;
  389. struct btrfs_inode_item *inode_item;
  390. struct scrub_warning *swarn = warn_ctx;
  391. struct btrfs_fs_info *fs_info = swarn->dev->dev_root->fs_info;
  392. struct inode_fs_paths *ipath = NULL;
  393. struct btrfs_root *local_root;
  394. struct btrfs_key root_key;
  395. root_key.objectid = root;
  396. root_key.type = BTRFS_ROOT_ITEM_KEY;
  397. root_key.offset = (u64)-1;
  398. local_root = btrfs_read_fs_root_no_name(fs_info, &root_key);
  399. if (IS_ERR(local_root)) {
  400. ret = PTR_ERR(local_root);
  401. goto err;
  402. }
  403. ret = inode_item_info(inum, 0, local_root, swarn->path);
  404. if (ret) {
  405. btrfs_release_path(swarn->path);
  406. goto err;
  407. }
  408. eb = swarn->path->nodes[0];
  409. inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
  410. struct btrfs_inode_item);
  411. isize = btrfs_inode_size(eb, inode_item);
  412. nlink = btrfs_inode_nlink(eb, inode_item);
  413. btrfs_release_path(swarn->path);
  414. ipath = init_ipath(4096, local_root, swarn->path);
  415. if (IS_ERR(ipath)) {
  416. ret = PTR_ERR(ipath);
  417. ipath = NULL;
  418. goto err;
  419. }
  420. ret = paths_from_inode(inum, ipath);
  421. if (ret < 0)
  422. goto err;
  423. /*
  424. * we deliberately ignore the bit ipath might have been too small to
  425. * hold all of the paths here
  426. */
  427. for (i = 0; i < ipath->fspath->elem_cnt; ++i)
  428. printk_in_rcu(KERN_WARNING "btrfs: %s at logical %llu on dev "
  429. "%s, sector %llu, root %llu, inode %llu, offset %llu, "
  430. "length %llu, links %u (path: %s)\n", swarn->errstr,
  431. swarn->logical, rcu_str_deref(swarn->dev->name),
  432. (unsigned long long)swarn->sector, root, inum, offset,
  433. min(isize - offset, (u64)PAGE_SIZE), nlink,
  434. (char *)(unsigned long)ipath->fspath->val[i]);
  435. free_ipath(ipath);
  436. return 0;
  437. err:
  438. printk_in_rcu(KERN_WARNING "btrfs: %s at logical %llu on dev "
  439. "%s, sector %llu, root %llu, inode %llu, offset %llu: path "
  440. "resolving failed with ret=%d\n", swarn->errstr,
  441. swarn->logical, rcu_str_deref(swarn->dev->name),
  442. (unsigned long long)swarn->sector, root, inum, offset, ret);
  443. free_ipath(ipath);
  444. return 0;
  445. }
  446. static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
  447. {
  448. struct btrfs_device *dev;
  449. struct btrfs_fs_info *fs_info;
  450. struct btrfs_path *path;
  451. struct btrfs_key found_key;
  452. struct extent_buffer *eb;
  453. struct btrfs_extent_item *ei;
  454. struct scrub_warning swarn;
  455. unsigned long ptr = 0;
  456. u64 extent_item_pos;
  457. u64 flags = 0;
  458. u64 ref_root;
  459. u32 item_size;
  460. u8 ref_level;
  461. const int bufsize = 4096;
  462. int ret;
  463. WARN_ON(sblock->page_count < 1);
  464. dev = sblock->pagev[0]->dev;
  465. fs_info = sblock->sctx->dev_root->fs_info;
  466. path = btrfs_alloc_path();
  467. swarn.scratch_buf = kmalloc(bufsize, GFP_NOFS);
  468. swarn.msg_buf = kmalloc(bufsize, GFP_NOFS);
  469. swarn.sector = (sblock->pagev[0]->physical) >> 9;
  470. swarn.logical = sblock->pagev[0]->logical;
  471. swarn.errstr = errstr;
  472. swarn.dev = NULL;
  473. swarn.msg_bufsize = bufsize;
  474. swarn.scratch_bufsize = bufsize;
  475. if (!path || !swarn.scratch_buf || !swarn.msg_buf)
  476. goto out;
  477. ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
  478. &flags);
  479. if (ret < 0)
  480. goto out;
  481. extent_item_pos = swarn.logical - found_key.objectid;
  482. swarn.extent_item_size = found_key.offset;
  483. eb = path->nodes[0];
  484. ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
  485. item_size = btrfs_item_size_nr(eb, path->slots[0]);
  486. if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
  487. do {
  488. ret = tree_backref_for_extent(&ptr, eb, ei, item_size,
  489. &ref_root, &ref_level);
  490. printk_in_rcu(KERN_WARNING
  491. "btrfs: %s at logical %llu on dev %s, "
  492. "sector %llu: metadata %s (level %d) in tree "
  493. "%llu\n", errstr, swarn.logical,
  494. rcu_str_deref(dev->name),
  495. (unsigned long long)swarn.sector,
  496. ref_level ? "node" : "leaf",
  497. ret < 0 ? -1 : ref_level,
  498. ret < 0 ? -1 : ref_root);
  499. } while (ret != 1);
  500. btrfs_release_path(path);
  501. } else {
  502. btrfs_release_path(path);
  503. swarn.path = path;
  504. swarn.dev = dev;
  505. iterate_extent_inodes(fs_info, found_key.objectid,
  506. extent_item_pos, 1,
  507. scrub_print_warning_inode, &swarn);
  508. }
  509. out:
  510. btrfs_free_path(path);
  511. kfree(swarn.scratch_buf);
  512. kfree(swarn.msg_buf);
  513. }
  514. static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
  515. {
  516. struct page *page = NULL;
  517. unsigned long index;
  518. struct scrub_fixup_nodatasum *fixup = fixup_ctx;
  519. int ret;
  520. int corrected = 0;
  521. struct btrfs_key key;
  522. struct inode *inode = NULL;
  523. struct btrfs_fs_info *fs_info;
  524. u64 end = offset + PAGE_SIZE - 1;
  525. struct btrfs_root *local_root;
  526. int srcu_index;
  527. key.objectid = root;
  528. key.type = BTRFS_ROOT_ITEM_KEY;
  529. key.offset = (u64)-1;
  530. fs_info = fixup->root->fs_info;
  531. srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
  532. local_root = btrfs_read_fs_root_no_name(fs_info, &key);
  533. if (IS_ERR(local_root)) {
  534. srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
  535. return PTR_ERR(local_root);
  536. }
  537. key.type = BTRFS_INODE_ITEM_KEY;
  538. key.objectid = inum;
  539. key.offset = 0;
  540. inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
  541. srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
  542. if (IS_ERR(inode))
  543. return PTR_ERR(inode);
  544. index = offset >> PAGE_CACHE_SHIFT;
  545. page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
  546. if (!page) {
  547. ret = -ENOMEM;
  548. goto out;
  549. }
  550. if (PageUptodate(page)) {
  551. if (PageDirty(page)) {
  552. /*
  553. * we need to write the data to the defect sector. the
  554. * data that was in that sector is not in memory,
  555. * because the page was modified. we must not write the
  556. * modified page to that sector.
  557. *
  558. * TODO: what could be done here: wait for the delalloc
  559. * runner to write out that page (might involve
  560. * COW) and see whether the sector is still
  561. * referenced afterwards.
  562. *
  563. * For the meantime, we'll treat this error
  564. * incorrectable, although there is a chance that a
  565. * later scrub will find the bad sector again and that
  566. * there's no dirty page in memory, then.
  567. */
  568. ret = -EIO;
  569. goto out;
  570. }
  571. fs_info = BTRFS_I(inode)->root->fs_info;
  572. ret = repair_io_failure(fs_info, offset, PAGE_SIZE,
  573. fixup->logical, page,
  574. fixup->mirror_num);
  575. unlock_page(page);
  576. corrected = !ret;
  577. } else {
  578. /*
  579. * we need to get good data first. the general readpage path
  580. * will call repair_io_failure for us, we just have to make
  581. * sure we read the bad mirror.
  582. */
  583. ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
  584. EXTENT_DAMAGED, GFP_NOFS);
  585. if (ret) {
  586. /* set_extent_bits should give proper error */
  587. WARN_ON(ret > 0);
  588. if (ret > 0)
  589. ret = -EFAULT;
  590. goto out;
  591. }
  592. ret = extent_read_full_page(&BTRFS_I(inode)->io_tree, page,
  593. btrfs_get_extent,
  594. fixup->mirror_num);
  595. wait_on_page_locked(page);
  596. corrected = !test_range_bit(&BTRFS_I(inode)->io_tree, offset,
  597. end, EXTENT_DAMAGED, 0, NULL);
  598. if (!corrected)
  599. clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
  600. EXTENT_DAMAGED, GFP_NOFS);
  601. }
  602. out:
  603. if (page)
  604. put_page(page);
  605. if (inode)
  606. iput(inode);
  607. if (ret < 0)
  608. return ret;
  609. if (ret == 0 && corrected) {
  610. /*
  611. * we only need to call readpage for one of the inodes belonging
  612. * to this extent. so make iterate_extent_inodes stop
  613. */
  614. return 1;
  615. }
  616. return -EIO;
  617. }
  618. static void scrub_fixup_nodatasum(struct btrfs_work *work)
  619. {
  620. int ret;
  621. struct scrub_fixup_nodatasum *fixup;
  622. struct scrub_ctx *sctx;
  623. struct btrfs_trans_handle *trans = NULL;
  624. struct btrfs_fs_info *fs_info;
  625. struct btrfs_path *path;
  626. int uncorrectable = 0;
  627. fixup = container_of(work, struct scrub_fixup_nodatasum, work);
  628. sctx = fixup->sctx;
  629. fs_info = fixup->root->fs_info;
  630. path = btrfs_alloc_path();
  631. if (!path) {
  632. spin_lock(&sctx->stat_lock);
  633. ++sctx->stat.malloc_errors;
  634. spin_unlock(&sctx->stat_lock);
  635. uncorrectable = 1;
  636. goto out;
  637. }
  638. trans = btrfs_join_transaction(fixup->root);
  639. if (IS_ERR(trans)) {
  640. uncorrectable = 1;
  641. goto out;
  642. }
  643. /*
  644. * the idea is to trigger a regular read through the standard path. we
  645. * read a page from the (failed) logical address by specifying the
  646. * corresponding copynum of the failed sector. thus, that readpage is
  647. * expected to fail.
  648. * that is the point where on-the-fly error correction will kick in
  649. * (once it's finished) and rewrite the failed sector if a good copy
  650. * can be found.
  651. */
  652. ret = iterate_inodes_from_logical(fixup->logical, fixup->root->fs_info,
  653. path, scrub_fixup_readpage,
  654. fixup);
  655. if (ret < 0) {
  656. uncorrectable = 1;
  657. goto out;
  658. }
  659. WARN_ON(ret != 1);
  660. spin_lock(&sctx->stat_lock);
  661. ++sctx->stat.corrected_errors;
  662. spin_unlock(&sctx->stat_lock);
  663. out:
  664. if (trans && !IS_ERR(trans))
  665. btrfs_end_transaction(trans, fixup->root);
  666. if (uncorrectable) {
  667. spin_lock(&sctx->stat_lock);
  668. ++sctx->stat.uncorrectable_errors;
  669. spin_unlock(&sctx->stat_lock);
  670. btrfs_dev_replace_stats_inc(
  671. &sctx->dev_root->fs_info->dev_replace.
  672. num_uncorrectable_read_errors);
  673. printk_ratelimited_in_rcu(KERN_ERR
  674. "btrfs: unable to fixup (nodatasum) error at logical %llu on dev %s\n",
  675. (unsigned long long)fixup->logical,
  676. rcu_str_deref(fixup->dev->name));
  677. }
  678. btrfs_free_path(path);
  679. kfree(fixup);
  680. scrub_pending_trans_workers_dec(sctx);
  681. }
  682. /*
  683. * scrub_handle_errored_block gets called when either verification of the
  684. * pages failed or the bio failed to read, e.g. with EIO. In the latter
  685. * case, this function handles all pages in the bio, even though only one
  686. * may be bad.
  687. * The goal of this function is to repair the errored block by using the
  688. * contents of one of the mirrors.
  689. */
  690. static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
  691. {
  692. struct scrub_ctx *sctx = sblock_to_check->sctx;
  693. struct btrfs_device *dev;
  694. struct btrfs_fs_info *fs_info;
  695. u64 length;
  696. u64 logical;
  697. u64 generation;
  698. unsigned int failed_mirror_index;
  699. unsigned int is_metadata;
  700. unsigned int have_csum;
  701. u8 *csum;
  702. struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */
  703. struct scrub_block *sblock_bad;
  704. int ret;
  705. int mirror_index;
  706. int page_num;
  707. int success;
  708. static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
  709. DEFAULT_RATELIMIT_BURST);
  710. BUG_ON(sblock_to_check->page_count < 1);
  711. fs_info = sctx->dev_root->fs_info;
  712. if (sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_SUPER) {
  713. /*
  714. * if we find an error in a super block, we just report it.
  715. * They will get written with the next transaction commit
  716. * anyway
  717. */
  718. spin_lock(&sctx->stat_lock);
  719. ++sctx->stat.super_errors;
  720. spin_unlock(&sctx->stat_lock);
  721. return 0;
  722. }
  723. length = sblock_to_check->page_count * PAGE_SIZE;
  724. logical = sblock_to_check->pagev[0]->logical;
  725. generation = sblock_to_check->pagev[0]->generation;
  726. BUG_ON(sblock_to_check->pagev[0]->mirror_num < 1);
  727. failed_mirror_index = sblock_to_check->pagev[0]->mirror_num - 1;
  728. is_metadata = !(sblock_to_check->pagev[0]->flags &
  729. BTRFS_EXTENT_FLAG_DATA);
  730. have_csum = sblock_to_check->pagev[0]->have_csum;
  731. csum = sblock_to_check->pagev[0]->csum;
  732. dev = sblock_to_check->pagev[0]->dev;
  733. if (sctx->is_dev_replace && !is_metadata && !have_csum) {
  734. sblocks_for_recheck = NULL;
  735. goto nodatasum_case;
  736. }
  737. /*
  738. * read all mirrors one after the other. This includes to
  739. * re-read the extent or metadata block that failed (that was
  740. * the cause that this fixup code is called) another time,
  741. * page by page this time in order to know which pages
  742. * caused I/O errors and which ones are good (for all mirrors).
  743. * It is the goal to handle the situation when more than one
  744. * mirror contains I/O errors, but the errors do not
  745. * overlap, i.e. the data can be repaired by selecting the
  746. * pages from those mirrors without I/O error on the
  747. * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
  748. * would be that mirror #1 has an I/O error on the first page,
  749. * the second page is good, and mirror #2 has an I/O error on
  750. * the second page, but the first page is good.
  751. * Then the first page of the first mirror can be repaired by
  752. * taking the first page of the second mirror, and the
  753. * second page of the second mirror can be repaired by
  754. * copying the contents of the 2nd page of the 1st mirror.
  755. * One more note: if the pages of one mirror contain I/O
  756. * errors, the checksum cannot be verified. In order to get
  757. * the best data for repairing, the first attempt is to find
  758. * a mirror without I/O errors and with a validated checksum.
  759. * Only if this is not possible, the pages are picked from
  760. * mirrors with I/O errors without considering the checksum.
  761. * If the latter is the case, at the end, the checksum of the
  762. * repaired area is verified in order to correctly maintain
  763. * the statistics.
  764. */
  765. sblocks_for_recheck = kzalloc(BTRFS_MAX_MIRRORS *
  766. sizeof(*sblocks_for_recheck),
  767. GFP_NOFS);
  768. if (!sblocks_for_recheck) {
  769. spin_lock(&sctx->stat_lock);
  770. sctx->stat.malloc_errors++;
  771. sctx->stat.read_errors++;
  772. sctx->stat.uncorrectable_errors++;
  773. spin_unlock(&sctx->stat_lock);
  774. btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
  775. goto out;
  776. }
  777. /* setup the context, map the logical blocks and alloc the pages */
  778. ret = scrub_setup_recheck_block(sctx, fs_info, sblock_to_check, length,
  779. logical, sblocks_for_recheck);
  780. if (ret) {
  781. spin_lock(&sctx->stat_lock);
  782. sctx->stat.read_errors++;
  783. sctx->stat.uncorrectable_errors++;
  784. spin_unlock(&sctx->stat_lock);
  785. btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
  786. goto out;
  787. }
  788. BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
  789. sblock_bad = sblocks_for_recheck + failed_mirror_index;
  790. /* build and submit the bios for the failed mirror, check checksums */
  791. scrub_recheck_block(fs_info, sblock_bad, is_metadata, have_csum,
  792. csum, generation, sctx->csum_size);
  793. if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
  794. sblock_bad->no_io_error_seen) {
  795. /*
  796. * the error disappeared after reading page by page, or
  797. * the area was part of a huge bio and other parts of the
  798. * bio caused I/O errors, or the block layer merged several
  799. * read requests into one and the error is caused by a
  800. * different bio (usually one of the two latter cases is
  801. * the cause)
  802. */
  803. spin_lock(&sctx->stat_lock);
  804. sctx->stat.unverified_errors++;
  805. spin_unlock(&sctx->stat_lock);
  806. if (sctx->is_dev_replace)
  807. scrub_write_block_to_dev_replace(sblock_bad);
  808. goto out;
  809. }
  810. if (!sblock_bad->no_io_error_seen) {
  811. spin_lock(&sctx->stat_lock);
  812. sctx->stat.read_errors++;
  813. spin_unlock(&sctx->stat_lock);
  814. if (__ratelimit(&_rs))
  815. scrub_print_warning("i/o error", sblock_to_check);
  816. btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
  817. } else if (sblock_bad->checksum_error) {
  818. spin_lock(&sctx->stat_lock);
  819. sctx->stat.csum_errors++;
  820. spin_unlock(&sctx->stat_lock);
  821. if (__ratelimit(&_rs))
  822. scrub_print_warning("checksum error", sblock_to_check);
  823. btrfs_dev_stat_inc_and_print(dev,
  824. BTRFS_DEV_STAT_CORRUPTION_ERRS);
  825. } else if (sblock_bad->header_error) {
  826. spin_lock(&sctx->stat_lock);
  827. sctx->stat.verify_errors++;
  828. spin_unlock(&sctx->stat_lock);
  829. if (__ratelimit(&_rs))
  830. scrub_print_warning("checksum/header error",
  831. sblock_to_check);
  832. if (sblock_bad->generation_error)
  833. btrfs_dev_stat_inc_and_print(dev,
  834. BTRFS_DEV_STAT_GENERATION_ERRS);
  835. else
  836. btrfs_dev_stat_inc_and_print(dev,
  837. BTRFS_DEV_STAT_CORRUPTION_ERRS);
  838. }
  839. if (sctx->readonly && !sctx->is_dev_replace)
  840. goto did_not_correct_error;
  841. if (!is_metadata && !have_csum) {
  842. struct scrub_fixup_nodatasum *fixup_nodatasum;
  843. nodatasum_case:
  844. WARN_ON(sctx->is_dev_replace);
  845. /*
  846. * !is_metadata and !have_csum, this means that the data
  847. * might not be COW'ed, that it might be modified
  848. * concurrently. The general strategy to work on the
  849. * commit root does not help in the case when COW is not
  850. * used.
  851. */
  852. fixup_nodatasum = kzalloc(sizeof(*fixup_nodatasum), GFP_NOFS);
  853. if (!fixup_nodatasum)
  854. goto did_not_correct_error;
  855. fixup_nodatasum->sctx = sctx;
  856. fixup_nodatasum->dev = dev;
  857. fixup_nodatasum->logical = logical;
  858. fixup_nodatasum->root = fs_info->extent_root;
  859. fixup_nodatasum->mirror_num = failed_mirror_index + 1;
  860. scrub_pending_trans_workers_inc(sctx);
  861. fixup_nodatasum->work.func = scrub_fixup_nodatasum;
  862. btrfs_queue_worker(&fs_info->scrub_workers,
  863. &fixup_nodatasum->work);
  864. goto out;
  865. }
  866. /*
  867. * now build and submit the bios for the other mirrors, check
  868. * checksums.
  869. * First try to pick the mirror which is completely without I/O
  870. * errors and also does not have a checksum error.
  871. * If one is found, and if a checksum is present, the full block
  872. * that is known to contain an error is rewritten. Afterwards
  873. * the block is known to be corrected.
  874. * If a mirror is found which is completely correct, and no
  875. * checksum is present, only those pages are rewritten that had
  876. * an I/O error in the block to be repaired, since it cannot be
  877. * determined, which copy of the other pages is better (and it
  878. * could happen otherwise that a correct page would be
  879. * overwritten by a bad one).
  880. */
  881. for (mirror_index = 0;
  882. mirror_index < BTRFS_MAX_MIRRORS &&
  883. sblocks_for_recheck[mirror_index].page_count > 0;
  884. mirror_index++) {
  885. struct scrub_block *sblock_other;
  886. if (mirror_index == failed_mirror_index)
  887. continue;
  888. sblock_other = sblocks_for_recheck + mirror_index;
  889. /* build and submit the bios, check checksums */
  890. scrub_recheck_block(fs_info, sblock_other, is_metadata,
  891. have_csum, csum, generation,
  892. sctx->csum_size);
  893. if (!sblock_other->header_error &&
  894. !sblock_other->checksum_error &&
  895. sblock_other->no_io_error_seen) {
  896. if (sctx->is_dev_replace) {
  897. scrub_write_block_to_dev_replace(sblock_other);
  898. } else {
  899. int force_write = is_metadata || have_csum;
  900. ret = scrub_repair_block_from_good_copy(
  901. sblock_bad, sblock_other,
  902. force_write);
  903. }
  904. if (0 == ret)
  905. goto corrected_error;
  906. }
  907. }
  908. /*
  909. * for dev_replace, pick good pages and write to the target device.
  910. */
  911. if (sctx->is_dev_replace) {
  912. success = 1;
  913. for (page_num = 0; page_num < sblock_bad->page_count;
  914. page_num++) {
  915. int sub_success;
  916. sub_success = 0;
  917. for (mirror_index = 0;
  918. mirror_index < BTRFS_MAX_MIRRORS &&
  919. sblocks_for_recheck[mirror_index].page_count > 0;
  920. mirror_index++) {
  921. struct scrub_block *sblock_other =
  922. sblocks_for_recheck + mirror_index;
  923. struct scrub_page *page_other =
  924. sblock_other->pagev[page_num];
  925. if (!page_other->io_error) {
  926. ret = scrub_write_page_to_dev_replace(
  927. sblock_other, page_num);
  928. if (ret == 0) {
  929. /* succeeded for this page */
  930. sub_success = 1;
  931. break;
  932. } else {
  933. btrfs_dev_replace_stats_inc(
  934. &sctx->dev_root->
  935. fs_info->dev_replace.
  936. num_write_errors);
  937. }
  938. }
  939. }
  940. if (!sub_success) {
  941. /*
  942. * did not find a mirror to fetch the page
  943. * from. scrub_write_page_to_dev_replace()
  944. * handles this case (page->io_error), by
  945. * filling the block with zeros before
  946. * submitting the write request
  947. */
  948. success = 0;
  949. ret = scrub_write_page_to_dev_replace(
  950. sblock_bad, page_num);
  951. if (ret)
  952. btrfs_dev_replace_stats_inc(
  953. &sctx->dev_root->fs_info->
  954. dev_replace.num_write_errors);
  955. }
  956. }
  957. goto out;
  958. }
  959. /*
  960. * for regular scrub, repair those pages that are errored.
  961. * In case of I/O errors in the area that is supposed to be
  962. * repaired, continue by picking good copies of those pages.
  963. * Select the good pages from mirrors to rewrite bad pages from
  964. * the area to fix. Afterwards verify the checksum of the block
  965. * that is supposed to be repaired. This verification step is
  966. * only done for the purpose of statistic counting and for the
  967. * final scrub report, whether errors remain.
  968. * A perfect algorithm could make use of the checksum and try
  969. * all possible combinations of pages from the different mirrors
  970. * until the checksum verification succeeds. For example, when
  971. * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
  972. * of mirror #2 is readable but the final checksum test fails,
  973. * then the 2nd page of mirror #3 could be tried, whether now
  974. * the final checksum succeedes. But this would be a rare
  975. * exception and is therefore not implemented. At least it is
  976. * avoided that the good copy is overwritten.
  977. * A more useful improvement would be to pick the sectors
  978. * without I/O error based on sector sizes (512 bytes on legacy
  979. * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
  980. * mirror could be repaired by taking 512 byte of a different
  981. * mirror, even if other 512 byte sectors in the same PAGE_SIZE
  982. * area are unreadable.
  983. */
  984. /* can only fix I/O errors from here on */
  985. if (sblock_bad->no_io_error_seen)
  986. goto did_not_correct_error;
  987. success = 1;
  988. for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
  989. struct scrub_page *page_bad = sblock_bad->pagev[page_num];
  990. if (!page_bad->io_error)
  991. continue;
  992. for (mirror_index = 0;
  993. mirror_index < BTRFS_MAX_MIRRORS &&
  994. sblocks_for_recheck[mirror_index].page_count > 0;
  995. mirror_index++) {
  996. struct scrub_block *sblock_other = sblocks_for_recheck +
  997. mirror_index;
  998. struct scrub_page *page_other = sblock_other->pagev[
  999. page_num];
  1000. if (!page_other->io_error) {
  1001. ret = scrub_repair_page_from_good_copy(
  1002. sblock_bad, sblock_other, page_num, 0);
  1003. if (0 == ret) {
  1004. page_bad->io_error = 0;
  1005. break; /* succeeded for this page */
  1006. }
  1007. }
  1008. }
  1009. if (page_bad->io_error) {
  1010. /* did not find a mirror to copy the page from */
  1011. success = 0;
  1012. }
  1013. }
  1014. if (success) {
  1015. if (is_metadata || have_csum) {
  1016. /*
  1017. * need to verify the checksum now that all
  1018. * sectors on disk are repaired (the write
  1019. * request for data to be repaired is on its way).
  1020. * Just be lazy and use scrub_recheck_block()
  1021. * which re-reads the data before the checksum
  1022. * is verified, but most likely the data comes out
  1023. * of the page cache.
  1024. */
  1025. scrub_recheck_block(fs_info, sblock_bad,
  1026. is_metadata, have_csum, csum,
  1027. generation, sctx->csum_size);
  1028. if (!sblock_bad->header_error &&
  1029. !sblock_bad->checksum_error &&
  1030. sblock_bad->no_io_error_seen)
  1031. goto corrected_error;
  1032. else
  1033. goto did_not_correct_error;
  1034. } else {
  1035. corrected_error:
  1036. spin_lock(&sctx->stat_lock);
  1037. sctx->stat.corrected_errors++;
  1038. spin_unlock(&sctx->stat_lock);
  1039. printk_ratelimited_in_rcu(KERN_ERR
  1040. "btrfs: fixed up error at logical %llu on dev %s\n",
  1041. (unsigned long long)logical,
  1042. rcu_str_deref(dev->name));
  1043. }
  1044. } else {
  1045. did_not_correct_error:
  1046. spin_lock(&sctx->stat_lock);
  1047. sctx->stat.uncorrectable_errors++;
  1048. spin_unlock(&sctx->stat_lock);
  1049. printk_ratelimited_in_rcu(KERN_ERR
  1050. "btrfs: unable to fixup (regular) error at logical %llu on dev %s\n",
  1051. (unsigned long long)logical,
  1052. rcu_str_deref(dev->name));
  1053. }
  1054. out:
  1055. if (sblocks_for_recheck) {
  1056. for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS;
  1057. mirror_index++) {
  1058. struct scrub_block *sblock = sblocks_for_recheck +
  1059. mirror_index;
  1060. int page_index;
  1061. for (page_index = 0; page_index < sblock->page_count;
  1062. page_index++) {
  1063. sblock->pagev[page_index]->sblock = NULL;
  1064. scrub_page_put(sblock->pagev[page_index]);
  1065. }
  1066. }
  1067. kfree(sblocks_for_recheck);
  1068. }
  1069. return 0;
  1070. }
  1071. static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
  1072. struct btrfs_fs_info *fs_info,
  1073. struct scrub_block *original_sblock,
  1074. u64 length, u64 logical,
  1075. struct scrub_block *sblocks_for_recheck)
  1076. {
  1077. int page_index;
  1078. int mirror_index;
  1079. int ret;
  1080. /*
  1081. * note: the two members ref_count and outstanding_pages
  1082. * are not used (and not set) in the blocks that are used for
  1083. * the recheck procedure
  1084. */
  1085. page_index = 0;
  1086. while (length > 0) {
  1087. u64 sublen = min_t(u64, length, PAGE_SIZE);
  1088. u64 mapped_length = sublen;
  1089. struct btrfs_bio *bbio = NULL;
  1090. /*
  1091. * with a length of PAGE_SIZE, each returned stripe
  1092. * represents one mirror
  1093. */
  1094. ret = btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS, logical,
  1095. &mapped_length, &bbio, 0);
  1096. if (ret || !bbio || mapped_length < sublen) {
  1097. kfree(bbio);
  1098. return -EIO;
  1099. }
  1100. BUG_ON(page_index >= SCRUB_PAGES_PER_RD_BIO);
  1101. for (mirror_index = 0; mirror_index < (int)bbio->num_stripes;
  1102. mirror_index++) {
  1103. struct scrub_block *sblock;
  1104. struct scrub_page *page;
  1105. if (mirror_index >= BTRFS_MAX_MIRRORS)
  1106. continue;
  1107. sblock = sblocks_for_recheck + mirror_index;
  1108. sblock->sctx = sctx;
  1109. page = kzalloc(sizeof(*page), GFP_NOFS);
  1110. if (!page) {
  1111. leave_nomem:
  1112. spin_lock(&sctx->stat_lock);
  1113. sctx->stat.malloc_errors++;
  1114. spin_unlock(&sctx->stat_lock);
  1115. kfree(bbio);
  1116. return -ENOMEM;
  1117. }
  1118. scrub_page_get(page);
  1119. sblock->pagev[page_index] = page;
  1120. page->logical = logical;
  1121. page->physical = bbio->stripes[mirror_index].physical;
  1122. BUG_ON(page_index >= original_sblock->page_count);
  1123. page->physical_for_dev_replace =
  1124. original_sblock->pagev[page_index]->
  1125. physical_for_dev_replace;
  1126. /* for missing devices, dev->bdev is NULL */
  1127. page->dev = bbio->stripes[mirror_index].dev;
  1128. page->mirror_num = mirror_index + 1;
  1129. sblock->page_count++;
  1130. page->page = alloc_page(GFP_NOFS);
  1131. if (!page->page)
  1132. goto leave_nomem;
  1133. }
  1134. kfree(bbio);
  1135. length -= sublen;
  1136. logical += sublen;
  1137. page_index++;
  1138. }
  1139. return 0;
  1140. }
  1141. /*
  1142. * this function will check the on disk data for checksum errors, header
  1143. * errors and read I/O errors. If any I/O errors happen, the exact pages
  1144. * which are errored are marked as being bad. The goal is to enable scrub
  1145. * to take those pages that are not errored from all the mirrors so that
  1146. * the pages that are errored in the just handled mirror can be repaired.
  1147. */
  1148. static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
  1149. struct scrub_block *sblock, int is_metadata,
  1150. int have_csum, u8 *csum, u64 generation,
  1151. u16 csum_size)
  1152. {
  1153. int page_num;
  1154. sblock->no_io_error_seen = 1;
  1155. sblock->header_error = 0;
  1156. sblock->checksum_error = 0;
  1157. for (page_num = 0; page_num < sblock->page_count; page_num++) {
  1158. struct bio *bio;
  1159. struct scrub_page *page = sblock->pagev[page_num];
  1160. DECLARE_COMPLETION_ONSTACK(complete);
  1161. if (page->dev->bdev == NULL) {
  1162. page->io_error = 1;
  1163. sblock->no_io_error_seen = 0;
  1164. continue;
  1165. }
  1166. WARN_ON(!page->page);
  1167. bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
  1168. if (!bio) {
  1169. page->io_error = 1;
  1170. sblock->no_io_error_seen = 0;
  1171. continue;
  1172. }
  1173. bio->bi_bdev = page->dev->bdev;
  1174. bio->bi_sector = page->physical >> 9;
  1175. bio->bi_end_io = scrub_complete_bio_end_io;
  1176. bio->bi_private = &complete;
  1177. bio_add_page(bio, page->page, PAGE_SIZE, 0);
  1178. btrfsic_submit_bio(READ, bio);
  1179. /* this will also unplug the queue */
  1180. wait_for_completion(&complete);
  1181. page->io_error = !test_bit(BIO_UPTODATE, &bio->bi_flags);
  1182. if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
  1183. sblock->no_io_error_seen = 0;
  1184. bio_put(bio);
  1185. }
  1186. if (sblock->no_io_error_seen)
  1187. scrub_recheck_block_checksum(fs_info, sblock, is_metadata,
  1188. have_csum, csum, generation,
  1189. csum_size);
  1190. return;
  1191. }
  1192. static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
  1193. struct scrub_block *sblock,
  1194. int is_metadata, int have_csum,
  1195. const u8 *csum, u64 generation,
  1196. u16 csum_size)
  1197. {
  1198. int page_num;
  1199. u8 calculated_csum[BTRFS_CSUM_SIZE];
  1200. u32 crc = ~(u32)0;
  1201. void *mapped_buffer;
  1202. WARN_ON(!sblock->pagev[0]->page);
  1203. if (is_metadata) {
  1204. struct btrfs_header *h;
  1205. mapped_buffer = kmap_atomic(sblock->pagev[0]->page);
  1206. h = (struct btrfs_header *)mapped_buffer;
  1207. if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h) ||
  1208. memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE) ||
  1209. memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
  1210. BTRFS_UUID_SIZE)) {
  1211. sblock->header_error = 1;
  1212. } else if (generation != btrfs_stack_header_generation(h)) {
  1213. sblock->header_error = 1;
  1214. sblock->generation_error = 1;
  1215. }
  1216. csum = h->csum;
  1217. } else {
  1218. if (!have_csum)
  1219. return;
  1220. mapped_buffer = kmap_atomic(sblock->pagev[0]->page);
  1221. }
  1222. for (page_num = 0;;) {
  1223. if (page_num == 0 && is_metadata)
  1224. crc = btrfs_csum_data(
  1225. ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE,
  1226. crc, PAGE_SIZE - BTRFS_CSUM_SIZE);
  1227. else
  1228. crc = btrfs_csum_data(mapped_buffer, crc, PAGE_SIZE);
  1229. kunmap_atomic(mapped_buffer);
  1230. page_num++;
  1231. if (page_num >= sblock->page_count)
  1232. break;
  1233. WARN_ON(!sblock->pagev[page_num]->page);
  1234. mapped_buffer = kmap_atomic(sblock->pagev[page_num]->page);
  1235. }
  1236. btrfs_csum_final(crc, calculated_csum);
  1237. if (memcmp(calculated_csum, csum, csum_size))
  1238. sblock->checksum_error = 1;
  1239. }
  1240. static void scrub_complete_bio_end_io(struct bio *bio, int err)
  1241. {
  1242. complete((struct completion *)bio->bi_private);
  1243. }
  1244. static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
  1245. struct scrub_block *sblock_good,
  1246. int force_write)
  1247. {
  1248. int page_num;
  1249. int ret = 0;
  1250. for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
  1251. int ret_sub;
  1252. ret_sub = scrub_repair_page_from_good_copy(sblock_bad,
  1253. sblock_good,
  1254. page_num,
  1255. force_write);
  1256. if (ret_sub)
  1257. ret = ret_sub;
  1258. }
  1259. return ret;
  1260. }
  1261. static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
  1262. struct scrub_block *sblock_good,
  1263. int page_num, int force_write)
  1264. {
  1265. struct scrub_page *page_bad = sblock_bad->pagev[page_num];
  1266. struct scrub_page *page_good = sblock_good->pagev[page_num];
  1267. BUG_ON(page_bad->page == NULL);
  1268. BUG_ON(page_good->page == NULL);
  1269. if (force_write || sblock_bad->header_error ||
  1270. sblock_bad->checksum_error || page_bad->io_error) {
  1271. struct bio *bio;
  1272. int ret;
  1273. DECLARE_COMPLETION_ONSTACK(complete);
  1274. if (!page_bad->dev->bdev) {
  1275. printk_ratelimited(KERN_WARNING
  1276. "btrfs: scrub_repair_page_from_good_copy(bdev == NULL) is unexpected!\n");
  1277. return -EIO;
  1278. }
  1279. bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
  1280. if (!bio)
  1281. return -EIO;
  1282. bio->bi_bdev = page_bad->dev->bdev;
  1283. bio->bi_sector = page_bad->physical >> 9;
  1284. bio->bi_end_io = scrub_complete_bio_end_io;
  1285. bio->bi_private = &complete;
  1286. ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
  1287. if (PAGE_SIZE != ret) {
  1288. bio_put(bio);
  1289. return -EIO;
  1290. }
  1291. btrfsic_submit_bio(WRITE, bio);
  1292. /* this will also unplug the queue */
  1293. wait_for_completion(&complete);
  1294. if (!bio_flagged(bio, BIO_UPTODATE)) {
  1295. btrfs_dev_stat_inc_and_print(page_bad->dev,
  1296. BTRFS_DEV_STAT_WRITE_ERRS);
  1297. btrfs_dev_replace_stats_inc(
  1298. &sblock_bad->sctx->dev_root->fs_info->
  1299. dev_replace.num_write_errors);
  1300. bio_put(bio);
  1301. return -EIO;
  1302. }
  1303. bio_put(bio);
  1304. }
  1305. return 0;
  1306. }
  1307. static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
  1308. {
  1309. int page_num;
  1310. for (page_num = 0; page_num < sblock->page_count; page_num++) {
  1311. int ret;
  1312. ret = scrub_write_page_to_dev_replace(sblock, page_num);
  1313. if (ret)
  1314. btrfs_dev_replace_stats_inc(
  1315. &sblock->sctx->dev_root->fs_info->dev_replace.
  1316. num_write_errors);
  1317. }
  1318. }
  1319. static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
  1320. int page_num)
  1321. {
  1322. struct scrub_page *spage = sblock->pagev[page_num];
  1323. BUG_ON(spage->page == NULL);
  1324. if (spage->io_error) {
  1325. void *mapped_buffer = kmap_atomic(spage->page);
  1326. memset(mapped_buffer, 0, PAGE_CACHE_SIZE);
  1327. flush_dcache_page(spage->page);
  1328. kunmap_atomic(mapped_buffer);
  1329. }
  1330. return scrub_add_page_to_wr_bio(sblock->sctx, spage);
  1331. }
  1332. static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
  1333. struct scrub_page *spage)
  1334. {
  1335. struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
  1336. struct scrub_bio *sbio;
  1337. int ret;
  1338. mutex_lock(&wr_ctx->wr_lock);
  1339. again:
  1340. if (!wr_ctx->wr_curr_bio) {
  1341. wr_ctx->wr_curr_bio = kzalloc(sizeof(*wr_ctx->wr_curr_bio),
  1342. GFP_NOFS);
  1343. if (!wr_ctx->wr_curr_bio) {
  1344. mutex_unlock(&wr_ctx->wr_lock);
  1345. return -ENOMEM;
  1346. }
  1347. wr_ctx->wr_curr_bio->sctx = sctx;
  1348. wr_ctx->wr_curr_bio->page_count = 0;
  1349. }
  1350. sbio = wr_ctx->wr_curr_bio;
  1351. if (sbio->page_count == 0) {
  1352. struct bio *bio;
  1353. sbio->physical = spage->physical_for_dev_replace;
  1354. sbio->logical = spage->logical;
  1355. sbio->dev = wr_ctx->tgtdev;
  1356. bio = sbio->bio;
  1357. if (!bio) {
  1358. bio = btrfs_io_bio_alloc(GFP_NOFS, wr_ctx->pages_per_wr_bio);
  1359. if (!bio) {
  1360. mutex_unlock(&wr_ctx->wr_lock);
  1361. return -ENOMEM;
  1362. }
  1363. sbio->bio = bio;
  1364. }
  1365. bio->bi_private = sbio;
  1366. bio->bi_end_io = scrub_wr_bio_end_io;
  1367. bio->bi_bdev = sbio->dev->bdev;
  1368. bio->bi_sector = sbio->physical >> 9;
  1369. sbio->err = 0;
  1370. } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
  1371. spage->physical_for_dev_replace ||
  1372. sbio->logical + sbio->page_count * PAGE_SIZE !=
  1373. spage->logical) {
  1374. scrub_wr_submit(sctx);
  1375. goto again;
  1376. }
  1377. ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
  1378. if (ret != PAGE_SIZE) {
  1379. if (sbio->page_count < 1) {
  1380. bio_put(sbio->bio);
  1381. sbio->bio = NULL;
  1382. mutex_unlock(&wr_ctx->wr_lock);
  1383. return -EIO;
  1384. }
  1385. scrub_wr_submit(sctx);
  1386. goto again;
  1387. }
  1388. sbio->pagev[sbio->page_count] = spage;
  1389. scrub_page_get(spage);
  1390. sbio->page_count++;
  1391. if (sbio->page_count == wr_ctx->pages_per_wr_bio)
  1392. scrub_wr_submit(sctx);
  1393. mutex_unlock(&wr_ctx->wr_lock);
  1394. return 0;
  1395. }
  1396. static void scrub_wr_submit(struct scrub_ctx *sctx)
  1397. {
  1398. struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
  1399. struct scrub_bio *sbio;
  1400. if (!wr_ctx->wr_curr_bio)
  1401. return;
  1402. sbio = wr_ctx->wr_curr_bio;
  1403. wr_ctx->wr_curr_bio = NULL;
  1404. WARN_ON(!sbio->bio->bi_bdev);
  1405. scrub_pending_bio_inc(sctx);
  1406. /* process all writes in a single worker thread. Then the block layer
  1407. * orders the requests before sending them to the driver which
  1408. * doubled the write performance on spinning disks when measured
  1409. * with Linux 3.5 */
  1410. btrfsic_submit_bio(WRITE, sbio->bio);
  1411. }
  1412. static void scrub_wr_bio_end_io(struct bio *bio, int err)
  1413. {
  1414. struct scrub_bio *sbio = bio->bi_private;
  1415. struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info;
  1416. sbio->err = err;
  1417. sbio->bio = bio;
  1418. sbio->work.func = scrub_wr_bio_end_io_worker;
  1419. btrfs_queue_worker(&fs_info->scrub_wr_completion_workers, &sbio->work);
  1420. }
  1421. static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
  1422. {
  1423. struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
  1424. struct scrub_ctx *sctx = sbio->sctx;
  1425. int i;
  1426. WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO);
  1427. if (sbio->err) {
  1428. struct btrfs_dev_replace *dev_replace =
  1429. &sbio->sctx->dev_root->fs_info->dev_replace;
  1430. for (i = 0; i < sbio->page_count; i++) {
  1431. struct scrub_page *spage = sbio->pagev[i];
  1432. spage->io_error = 1;
  1433. btrfs_dev_replace_stats_inc(&dev_replace->
  1434. num_write_errors);
  1435. }
  1436. }
  1437. for (i = 0; i < sbio->page_count; i++)
  1438. scrub_page_put(sbio->pagev[i]);
  1439. bio_put(sbio->bio);
  1440. kfree(sbio);
  1441. scrub_pending_bio_dec(sctx);
  1442. }
  1443. static int scrub_checksum(struct scrub_block *sblock)
  1444. {
  1445. u64 flags;
  1446. int ret;
  1447. WARN_ON(sblock->page_count < 1);
  1448. flags = sblock->pagev[0]->flags;
  1449. ret = 0;
  1450. if (flags & BTRFS_EXTENT_FLAG_DATA)
  1451. ret = scrub_checksum_data(sblock);
  1452. else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
  1453. ret = scrub_checksum_tree_block(sblock);
  1454. else if (flags & BTRFS_EXTENT_FLAG_SUPER)
  1455. (void)scrub_checksum_super(sblock);
  1456. else
  1457. WARN_ON(1);
  1458. if (ret)
  1459. scrub_handle_errored_block(sblock);
  1460. return ret;
  1461. }
  1462. static int scrub_checksum_data(struct scrub_block *sblock)
  1463. {
  1464. struct scrub_ctx *sctx = sblock->sctx;
  1465. u8 csum[BTRFS_CSUM_SIZE];
  1466. u8 *on_disk_csum;
  1467. struct page *page;
  1468. void *buffer;
  1469. u32 crc = ~(u32)0;
  1470. int fail = 0;
  1471. u64 len;
  1472. int index;
  1473. BUG_ON(sblock->page_count < 1);
  1474. if (!sblock->pagev[0]->have_csum)
  1475. return 0;
  1476. on_disk_csum = sblock->pagev[0]->csum;
  1477. page = sblock->pagev[0]->page;
  1478. buffer = kmap_atomic(page);
  1479. len = sctx->sectorsize;
  1480. index = 0;
  1481. for (;;) {
  1482. u64 l = min_t(u64, len, PAGE_SIZE);
  1483. crc = btrfs_csum_data(buffer, crc, l);
  1484. kunmap_atomic(buffer);
  1485. len -= l;
  1486. if (len == 0)
  1487. break;
  1488. index++;
  1489. BUG_ON(index >= sblock->page_count);
  1490. BUG_ON(!sblock->pagev[index]->page);
  1491. page = sblock->pagev[index]->page;
  1492. buffer = kmap_atomic(page);
  1493. }
  1494. btrfs_csum_final(crc, csum);
  1495. if (memcmp(csum, on_disk_csum, sctx->csum_size))
  1496. fail = 1;
  1497. return fail;
  1498. }
  1499. static int scrub_checksum_tree_block(struct scrub_block *sblock)
  1500. {
  1501. struct scrub_ctx *sctx = sblock->sctx;
  1502. struct btrfs_header *h;
  1503. struct btrfs_root *root = sctx->dev_root;
  1504. struct btrfs_fs_info *fs_info = root->fs_info;
  1505. u8 calculated_csum[BTRFS_CSUM_SIZE];
  1506. u8 on_disk_csum[BTRFS_CSUM_SIZE];
  1507. struct page *page;
  1508. void *mapped_buffer;
  1509. u64 mapped_size;
  1510. void *p;
  1511. u32 crc = ~(u32)0;
  1512. int fail = 0;
  1513. int crc_fail = 0;
  1514. u64 len;
  1515. int index;
  1516. BUG_ON(sblock->page_count < 1);
  1517. page = sblock->pagev[0]->page;
  1518. mapped_buffer = kmap_atomic(page);
  1519. h = (struct btrfs_header *)mapped_buffer;
  1520. memcpy(on_disk_csum, h->csum, sctx->csum_size);
  1521. /*
  1522. * we don't use the getter functions here, as we
  1523. * a) don't have an extent buffer and
  1524. * b) the page is already kmapped
  1525. */
  1526. if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h))
  1527. ++fail;
  1528. if (sblock->pagev[0]->generation != btrfs_stack_header_generation(h))
  1529. ++fail;
  1530. if (memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
  1531. ++fail;
  1532. if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
  1533. BTRFS_UUID_SIZE))
  1534. ++fail;
  1535. WARN_ON(sctx->nodesize != sctx->leafsize);
  1536. len = sctx->nodesize - BTRFS_CSUM_SIZE;
  1537. mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
  1538. p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
  1539. index = 0;
  1540. for (;;) {
  1541. u64 l = min_t(u64, len, mapped_size);
  1542. crc = btrfs_csum_data(p, crc, l);
  1543. kunmap_atomic(mapped_buffer);
  1544. len -= l;
  1545. if (len == 0)
  1546. break;
  1547. index++;
  1548. BUG_ON(index >= sblock->page_count);
  1549. BUG_ON(!sblock->pagev[index]->page);
  1550. page = sblock->pagev[index]->page;
  1551. mapped_buffer = kmap_atomic(page);
  1552. mapped_size = PAGE_SIZE;
  1553. p = mapped_buffer;
  1554. }
  1555. btrfs_csum_final(crc, calculated_csum);
  1556. if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
  1557. ++crc_fail;
  1558. return fail || crc_fail;
  1559. }
  1560. static int scrub_checksum_super(struct scrub_block *sblock)
  1561. {
  1562. struct btrfs_super_block *s;
  1563. struct scrub_ctx *sctx = sblock->sctx;
  1564. struct btrfs_root *root = sctx->dev_root;
  1565. struct btrfs_fs_info *fs_info = root->fs_info;
  1566. u8 calculated_csum[BTRFS_CSUM_SIZE];
  1567. u8 on_disk_csum[BTRFS_CSUM_SIZE];
  1568. struct page *page;
  1569. void *mapped_buffer;
  1570. u64 mapped_size;
  1571. void *p;
  1572. u32 crc = ~(u32)0;
  1573. int fail_gen = 0;
  1574. int fail_cor = 0;
  1575. u64 len;
  1576. int index;
  1577. BUG_ON(sblock->page_count < 1);
  1578. page = sblock->pagev[0]->page;
  1579. mapped_buffer = kmap_atomic(page);
  1580. s = (struct btrfs_super_block *)mapped_buffer;
  1581. memcpy(on_disk_csum, s->csum, sctx->csum_size);
  1582. if (sblock->pagev[0]->logical != btrfs_super_bytenr(s))
  1583. ++fail_cor;
  1584. if (sblock->pagev[0]->generation != btrfs_super_generation(s))
  1585. ++fail_gen;
  1586. if (memcmp(s->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
  1587. ++fail_cor;
  1588. len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE;
  1589. mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
  1590. p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
  1591. index = 0;
  1592. for (;;) {
  1593. u64 l = min_t(u64, len, mapped_size);
  1594. crc = btrfs_csum_data(p, crc, l);
  1595. kunmap_atomic(mapped_buffer);
  1596. len -= l;
  1597. if (len == 0)
  1598. break;
  1599. index++;
  1600. BUG_ON(index >= sblock->page_count);
  1601. BUG_ON(!sblock->pagev[index]->page);
  1602. page = sblock->pagev[index]->page;
  1603. mapped_buffer = kmap_atomic(page);
  1604. mapped_size = PAGE_SIZE;
  1605. p = mapped_buffer;
  1606. }
  1607. btrfs_csum_final(crc, calculated_csum);
  1608. if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
  1609. ++fail_cor;
  1610. if (fail_cor + fail_gen) {
  1611. /*
  1612. * if we find an error in a super block, we just report it.
  1613. * They will get written with the next transaction commit
  1614. * anyway
  1615. */
  1616. spin_lock(&sctx->stat_lock);
  1617. ++sctx->stat.super_errors;
  1618. spin_unlock(&sctx->stat_lock);
  1619. if (fail_cor)
  1620. btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
  1621. BTRFS_DEV_STAT_CORRUPTION_ERRS);
  1622. else
  1623. btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
  1624. BTRFS_DEV_STAT_GENERATION_ERRS);
  1625. }
  1626. return fail_cor + fail_gen;
  1627. }
  1628. static void scrub_block_get(struct scrub_block *sblock)
  1629. {
  1630. atomic_inc(&sblock->ref_count);
  1631. }
  1632. static void scrub_block_put(struct scrub_block *sblock)
  1633. {
  1634. if (atomic_dec_and_test(&sblock->ref_count)) {
  1635. int i;
  1636. for (i = 0; i < sblock->page_count; i++)
  1637. scrub_page_put(sblock->pagev[i]);
  1638. kfree(sblock);
  1639. }
  1640. }
  1641. static void scrub_page_get(struct scrub_page *spage)
  1642. {
  1643. atomic_inc(&spage->ref_count);
  1644. }
  1645. static void scrub_page_put(struct scrub_page *spage)
  1646. {
  1647. if (atomic_dec_and_test(&spage->ref_count)) {
  1648. if (spage->page)
  1649. __free_page(spage->page);
  1650. kfree(spage);
  1651. }
  1652. }
  1653. static void scrub_submit(struct scrub_ctx *sctx)
  1654. {
  1655. struct scrub_bio *sbio;
  1656. if (sctx->curr == -1)
  1657. return;
  1658. sbio = sctx->bios[sctx->curr];
  1659. sctx->curr = -1;
  1660. scrub_pending_bio_inc(sctx);
  1661. if (!sbio->bio->bi_bdev) {
  1662. /*
  1663. * this case should not happen. If btrfs_map_block() is
  1664. * wrong, it could happen for dev-replace operations on
  1665. * missing devices when no mirrors are available, but in
  1666. * this case it should already fail the mount.
  1667. * This case is handled correctly (but _very_ slowly).
  1668. */
  1669. printk_ratelimited(KERN_WARNING
  1670. "btrfs: scrub_submit(bio bdev == NULL) is unexpected!\n");
  1671. bio_endio(sbio->bio, -EIO);
  1672. } else {
  1673. btrfsic_submit_bio(READ, sbio->bio);
  1674. }
  1675. }
  1676. static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
  1677. struct scrub_page *spage)
  1678. {
  1679. struct scrub_block *sblock = spage->sblock;
  1680. struct scrub_bio *sbio;
  1681. int ret;
  1682. again:
  1683. /*
  1684. * grab a fresh bio or wait for one to become available
  1685. */
  1686. while (sctx->curr == -1) {
  1687. spin_lock(&sctx->list_lock);
  1688. sctx->curr = sctx->first_free;
  1689. if (sctx->curr != -1) {
  1690. sctx->first_free = sctx->bios[sctx->curr]->next_free;
  1691. sctx->bios[sctx->curr]->next_free = -1;
  1692. sctx->bios[sctx->curr]->page_count = 0;
  1693. spin_unlock(&sctx->list_lock);
  1694. } else {
  1695. spin_unlock(&sctx->list_lock);
  1696. wait_event(sctx->list_wait, sctx->first_free != -1);
  1697. }
  1698. }
  1699. sbio = sctx->bios[sctx->curr];
  1700. if (sbio->page_count == 0) {
  1701. struct bio *bio;
  1702. sbio->physical = spage->physical;
  1703. sbio->logical = spage->logical;
  1704. sbio->dev = spage->dev;
  1705. bio = sbio->bio;
  1706. if (!bio) {
  1707. bio = btrfs_io_bio_alloc(GFP_NOFS, sctx->pages_per_rd_bio);
  1708. if (!bio)
  1709. return -ENOMEM;
  1710. sbio->bio = bio;
  1711. }
  1712. bio->bi_private = sbio;
  1713. bio->bi_end_io = scrub_bio_end_io;
  1714. bio->bi_bdev = sbio->dev->bdev;
  1715. bio->bi_sector = sbio->physical >> 9;
  1716. sbio->err = 0;
  1717. } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
  1718. spage->physical ||
  1719. sbio->logical + sbio->page_count * PAGE_SIZE !=
  1720. spage->logical ||
  1721. sbio->dev != spage->dev) {
  1722. scrub_submit(sctx);
  1723. goto again;
  1724. }
  1725. sbio->pagev[sbio->page_count] = spage;
  1726. ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
  1727. if (ret != PAGE_SIZE) {
  1728. if (sbio->page_count < 1) {
  1729. bio_put(sbio->bio);
  1730. sbio->bio = NULL;
  1731. return -EIO;
  1732. }
  1733. scrub_submit(sctx);
  1734. goto again;
  1735. }
  1736. scrub_block_get(sblock); /* one for the page added to the bio */
  1737. atomic_inc(&sblock->outstanding_pages);
  1738. sbio->page_count++;
  1739. if (sbio->page_count == sctx->pages_per_rd_bio)
  1740. scrub_submit(sctx);
  1741. return 0;
  1742. }
  1743. static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
  1744. u64 physical, struct btrfs_device *dev, u64 flags,
  1745. u64 gen, int mirror_num, u8 *csum, int force,
  1746. u64 physical_for_dev_replace)
  1747. {
  1748. struct scrub_block *sblock;
  1749. int index;
  1750. sblock = kzalloc(sizeof(*sblock), GFP_NOFS);
  1751. if (!sblock) {
  1752. spin_lock(&sctx->stat_lock);
  1753. sctx->stat.malloc_errors++;
  1754. spin_unlock(&sctx->stat_lock);
  1755. return -ENOMEM;
  1756. }
  1757. /* one ref inside this function, plus one for each page added to
  1758. * a bio later on */
  1759. atomic_set(&sblock->ref_count, 1);
  1760. sblock->sctx = sctx;
  1761. sblock->no_io_error_seen = 1;
  1762. for (index = 0; len > 0; index++) {
  1763. struct scrub_page *spage;
  1764. u64 l = min_t(u64, len, PAGE_SIZE);
  1765. spage = kzalloc(sizeof(*spage), GFP_NOFS);
  1766. if (!spage) {
  1767. leave_nomem:
  1768. spin_lock(&sctx->stat_lock);
  1769. sctx->stat.malloc_errors++;
  1770. spin_unlock(&sctx->stat_lock);
  1771. scrub_block_put(sblock);
  1772. return -ENOMEM;
  1773. }
  1774. BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
  1775. scrub_page_get(spage);
  1776. sblock->pagev[index] = spage;
  1777. spage->sblock = sblock;
  1778. spage->dev = dev;
  1779. spage->flags = flags;
  1780. spage->generation = gen;
  1781. spage->logical = logical;
  1782. spage->physical = physical;
  1783. spage->physical_for_dev_replace = physical_for_dev_replace;
  1784. spage->mirror_num = mirror_num;
  1785. if (csum) {
  1786. spage->have_csum = 1;
  1787. memcpy(spage->csum, csum, sctx->csum_size);
  1788. } else {
  1789. spage->have_csum = 0;
  1790. }
  1791. sblock->page_count++;
  1792. spage->page = alloc_page(GFP_NOFS);
  1793. if (!spage->page)
  1794. goto leave_nomem;
  1795. len -= l;
  1796. logical += l;
  1797. physical += l;
  1798. physical_for_dev_replace += l;
  1799. }
  1800. WARN_ON(sblock->page_count == 0);
  1801. for (index = 0; index < sblock->page_count; index++) {
  1802. struct scrub_page *spage = sblock->pagev[index];
  1803. int ret;
  1804. ret = scrub_add_page_to_rd_bio(sctx, spage);
  1805. if (ret) {
  1806. scrub_block_put(sblock);
  1807. return ret;
  1808. }
  1809. }
  1810. if (force)
  1811. scrub_submit(sctx);
  1812. /* last one frees, either here or in bio completion for last page */
  1813. scrub_block_put(sblock);
  1814. return 0;
  1815. }
  1816. static void scrub_bio_end_io(struct bio *bio, int err)
  1817. {
  1818. struct scrub_bio *sbio = bio->bi_private;
  1819. struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info;
  1820. sbio->err = err;
  1821. sbio->bio = bio;
  1822. btrfs_queue_worker(&fs_info->scrub_workers, &sbio->work);
  1823. }
  1824. static void scrub_bio_end_io_worker(struct btrfs_work *work)
  1825. {
  1826. struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
  1827. struct scrub_ctx *sctx = sbio->sctx;
  1828. int i;
  1829. BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO);
  1830. if (sbio->err) {
  1831. for (i = 0; i < sbio->page_count; i++) {
  1832. struct scrub_page *spage = sbio->pagev[i];
  1833. spage->io_error = 1;
  1834. spage->sblock->no_io_error_seen = 0;
  1835. }
  1836. }
  1837. /* now complete the scrub_block items that have all pages completed */
  1838. for (i = 0; i < sbio->page_count; i++) {
  1839. struct scrub_page *spage = sbio->pagev[i];
  1840. struct scrub_block *sblock = spage->sblock;
  1841. if (atomic_dec_and_test(&sblock->outstanding_pages))
  1842. scrub_block_complete(sblock);
  1843. scrub_block_put(sblock);
  1844. }
  1845. bio_put(sbio->bio);
  1846. sbio->bio = NULL;
  1847. spin_lock(&sctx->list_lock);
  1848. sbio->next_free = sctx->first_free;
  1849. sctx->first_free = sbio->index;
  1850. spin_unlock(&sctx->list_lock);
  1851. if (sctx->is_dev_replace &&
  1852. atomic_read(&sctx->wr_ctx.flush_all_writes)) {
  1853. mutex_lock(&sctx->wr_ctx.wr_lock);
  1854. scrub_wr_submit(sctx);
  1855. mutex_unlock(&sctx->wr_ctx.wr_lock);
  1856. }
  1857. scrub_pending_bio_dec(sctx);
  1858. }
  1859. static void scrub_block_complete(struct scrub_block *sblock)
  1860. {
  1861. if (!sblock->no_io_error_seen) {
  1862. scrub_handle_errored_block(sblock);
  1863. } else {
  1864. /*
  1865. * if has checksum error, write via repair mechanism in
  1866. * dev replace case, otherwise write here in dev replace
  1867. * case.
  1868. */
  1869. if (!scrub_checksum(sblock) && sblock->sctx->is_dev_replace)
  1870. scrub_write_block_to_dev_replace(sblock);
  1871. }
  1872. }
  1873. static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u64 len,
  1874. u8 *csum)
  1875. {
  1876. struct btrfs_ordered_sum *sum = NULL;
  1877. unsigned long index;
  1878. unsigned long num_sectors;
  1879. while (!list_empty(&sctx->csum_list)) {
  1880. sum = list_first_entry(&sctx->csum_list,
  1881. struct btrfs_ordered_sum, list);
  1882. if (sum->bytenr > logical)
  1883. return 0;
  1884. if (sum->bytenr + sum->len > logical)
  1885. break;
  1886. ++sctx->stat.csum_discards;
  1887. list_del(&sum->list);
  1888. kfree(sum);
  1889. sum = NULL;
  1890. }
  1891. if (!sum)
  1892. return 0;
  1893. index = ((u32)(logical - sum->bytenr)) / sctx->sectorsize;
  1894. num_sectors = sum->len / sctx->sectorsize;
  1895. memcpy(csum, sum->sums + index, sctx->csum_size);
  1896. if (index == num_sectors - 1) {
  1897. list_del(&sum->list);
  1898. kfree(sum);
  1899. }
  1900. return 1;
  1901. }
  1902. /* scrub extent tries to collect up to 64 kB for each bio */
  1903. static int scrub_extent(struct scrub_ctx *sctx, u64 logical, u64 len,
  1904. u64 physical, struct btrfs_device *dev, u64 flags,
  1905. u64 gen, int mirror_num, u64 physical_for_dev_replace)
  1906. {
  1907. int ret;
  1908. u8 csum[BTRFS_CSUM_SIZE];
  1909. u32 blocksize;
  1910. if (flags & BTRFS_EXTENT_FLAG_DATA) {
  1911. blocksize = sctx->sectorsize;
  1912. spin_lock(&sctx->stat_lock);
  1913. sctx->stat.data_extents_scrubbed++;
  1914. sctx->stat.data_bytes_scrubbed += len;
  1915. spin_unlock(&sctx->stat_lock);
  1916. } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
  1917. WARN_ON(sctx->nodesize != sctx->leafsize);
  1918. blocksize = sctx->nodesize;
  1919. spin_lock(&sctx->stat_lock);
  1920. sctx->stat.tree_extents_scrubbed++;
  1921. sctx->stat.tree_bytes_scrubbed += len;
  1922. spin_unlock(&sctx->stat_lock);
  1923. } else {
  1924. blocksize = sctx->sectorsize;
  1925. WARN_ON(1);
  1926. }
  1927. while (len) {
  1928. u64 l = min_t(u64, len, blocksize);
  1929. int have_csum = 0;
  1930. if (flags & BTRFS_EXTENT_FLAG_DATA) {
  1931. /* push csums to sbio */
  1932. have_csum = scrub_find_csum(sctx, logical, l, csum);
  1933. if (have_csum == 0)
  1934. ++sctx->stat.no_csum;
  1935. if (sctx->is_dev_replace && !have_csum) {
  1936. ret = copy_nocow_pages(sctx, logical, l,
  1937. mirror_num,
  1938. physical_for_dev_replace);
  1939. goto behind_scrub_pages;
  1940. }
  1941. }
  1942. ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen,
  1943. mirror_num, have_csum ? csum : NULL, 0,
  1944. physical_for_dev_replace);
  1945. behind_scrub_pages:
  1946. if (ret)
  1947. return ret;
  1948. len -= l;
  1949. logical += l;
  1950. physical += l;
  1951. physical_for_dev_replace += l;
  1952. }
  1953. return 0;
  1954. }
  1955. static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
  1956. struct map_lookup *map,
  1957. struct btrfs_device *scrub_dev,
  1958. int num, u64 base, u64 length,
  1959. int is_dev_replace)
  1960. {
  1961. struct btrfs_path *path;
  1962. struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
  1963. struct btrfs_root *root = fs_info->extent_root;
  1964. struct btrfs_root *csum_root = fs_info->csum_root;
  1965. struct btrfs_extent_item *extent;
  1966. struct blk_plug plug;
  1967. u64 flags;
  1968. int ret;
  1969. int slot;
  1970. u64 nstripes;
  1971. struct extent_buffer *l;
  1972. struct btrfs_key key;
  1973. u64 physical;
  1974. u64 logical;
  1975. u64 logic_end;
  1976. u64 generation;
  1977. int mirror_num;
  1978. struct reada_control *reada1;
  1979. struct reada_control *reada2;
  1980. struct btrfs_key key_start;
  1981. struct btrfs_key key_end;
  1982. u64 increment = map->stripe_len;
  1983. u64 offset;
  1984. u64 extent_logical;
  1985. u64 extent_physical;
  1986. u64 extent_len;
  1987. struct btrfs_device *extent_dev;
  1988. int extent_mirror_num;
  1989. int stop_loop;
  1990. if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
  1991. BTRFS_BLOCK_GROUP_RAID6)) {
  1992. if (num >= nr_data_stripes(map)) {
  1993. return 0;
  1994. }
  1995. }
  1996. nstripes = length;
  1997. offset = 0;
  1998. do_div(nstripes, map->stripe_len);
  1999. if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
  2000. offset = map->stripe_len * num;
  2001. increment = map->stripe_len * map->num_stripes;
  2002. mirror_num = 1;
  2003. } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
  2004. int factor = map->num_stripes / map->sub_stripes;
  2005. offset = map->stripe_len * (num / map->sub_stripes);
  2006. increment = map->stripe_len * factor;
  2007. mirror_num = num % map->sub_stripes + 1;
  2008. } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
  2009. increment = map->stripe_len;
  2010. mirror_num = num % map->num_stripes + 1;
  2011. } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
  2012. increment = map->stripe_len;
  2013. mirror_num = num % map->num_stripes + 1;
  2014. } else {
  2015. increment = map->stripe_len;
  2016. mirror_num = 1;
  2017. }
  2018. path = btrfs_alloc_path();
  2019. if (!path)
  2020. return -ENOMEM;
  2021. /*
  2022. * work on commit root. The related disk blocks are static as
  2023. * long as COW is applied. This means, it is save to rewrite
  2024. * them to repair disk errors without any race conditions
  2025. */
  2026. path->search_commit_root = 1;
  2027. path->skip_locking = 1;
  2028. /*
  2029. * trigger the readahead for extent tree csum tree and wait for
  2030. * completion. During readahead, the scrub is officially paused
  2031. * to not hold off transaction commits
  2032. */
  2033. logical = base + offset;
  2034. wait_event(sctx->list_wait,
  2035. atomic_read(&sctx->bios_in_flight) == 0);
  2036. atomic_inc(&fs_info->scrubs_paused);
  2037. wake_up(&fs_info->scrub_pause_wait);
  2038. /* FIXME it might be better to start readahead at commit root */
  2039. key_start.objectid = logical;
  2040. key_start.type = BTRFS_EXTENT_ITEM_KEY;
  2041. key_start.offset = (u64)0;
  2042. key_end.objectid = base + offset + nstripes * increment;
  2043. key_end.type = BTRFS_METADATA_ITEM_KEY;
  2044. key_end.offset = (u64)-1;
  2045. reada1 = btrfs_reada_add(root, &key_start, &key_end);
  2046. key_start.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
  2047. key_start.type = BTRFS_EXTENT_CSUM_KEY;
  2048. key_start.offset = logical;
  2049. key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
  2050. key_end.type = BTRFS_EXTENT_CSUM_KEY;
  2051. key_end.offset = base + offset + nstripes * increment;
  2052. reada2 = btrfs_reada_add(csum_root, &key_start, &key_end);
  2053. if (!IS_ERR(reada1))
  2054. btrfs_reada_wait(reada1);
  2055. if (!IS_ERR(reada2))
  2056. btrfs_reada_wait(reada2);
  2057. mutex_lock(&fs_info->scrub_lock);
  2058. while (atomic_read(&fs_info->scrub_pause_req)) {
  2059. mutex_unlock(&fs_info->scrub_lock);
  2060. wait_event(fs_info->scrub_pause_wait,
  2061. atomic_read(&fs_info->scrub_pause_req) == 0);
  2062. mutex_lock(&fs_info->scrub_lock);
  2063. }
  2064. atomic_dec(&fs_info->scrubs_paused);
  2065. mutex_unlock(&fs_info->scrub_lock);
  2066. wake_up(&fs_info->scrub_pause_wait);
  2067. /*
  2068. * collect all data csums for the stripe to avoid seeking during
  2069. * the scrub. This might currently (crc32) end up to be about 1MB
  2070. */
  2071. blk_start_plug(&plug);
  2072. /*
  2073. * now find all extents for each stripe and scrub them
  2074. */
  2075. logical = base + offset;
  2076. physical = map->stripes[num].physical;
  2077. logic_end = logical + increment * nstripes;
  2078. ret = 0;
  2079. while (logical < logic_end) {
  2080. /*
  2081. * canceled?
  2082. */
  2083. if (atomic_read(&fs_info->scrub_cancel_req) ||
  2084. atomic_read(&sctx->cancel_req)) {
  2085. ret = -ECANCELED;
  2086. goto out;
  2087. }
  2088. /*
  2089. * check to see if we have to pause
  2090. */
  2091. if (atomic_read(&fs_info->scrub_pause_req)) {
  2092. /* push queued extents */
  2093. atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
  2094. scrub_submit(sctx);
  2095. mutex_lock(&sctx->wr_ctx.wr_lock);
  2096. scrub_wr_submit(sctx);
  2097. mutex_unlock(&sctx->wr_ctx.wr_lock);
  2098. wait_event(sctx->list_wait,
  2099. atomic_read(&sctx->bios_in_flight) == 0);
  2100. atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
  2101. atomic_inc(&fs_info->scrubs_paused);
  2102. wake_up(&fs_info->scrub_pause_wait);
  2103. mutex_lock(&fs_info->scrub_lock);
  2104. while (atomic_read(&fs_info->scrub_pause_req)) {
  2105. mutex_unlock(&fs_info->scrub_lock);
  2106. wait_event(fs_info->scrub_pause_wait,
  2107. atomic_read(&fs_info->scrub_pause_req) == 0);
  2108. mutex_lock(&fs_info->scrub_lock);
  2109. }
  2110. atomic_dec(&fs_info->scrubs_paused);
  2111. mutex_unlock(&fs_info->scrub_lock);
  2112. wake_up(&fs_info->scrub_pause_wait);
  2113. }
  2114. key.objectid = logical;
  2115. key.type = BTRFS_EXTENT_ITEM_KEY;
  2116. key.offset = (u64)-1;
  2117. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  2118. if (ret < 0)
  2119. goto out;
  2120. if (ret > 0) {
  2121. ret = btrfs_previous_item(root, path, 0,
  2122. BTRFS_EXTENT_ITEM_KEY);
  2123. if (ret < 0)
  2124. goto out;
  2125. if (ret > 0) {
  2126. /* there's no smaller item, so stick with the
  2127. * larger one */
  2128. btrfs_release_path(path);
  2129. ret = btrfs_search_slot(NULL, root, &key,
  2130. path, 0, 0);
  2131. if (ret < 0)
  2132. goto out;
  2133. }
  2134. }
  2135. stop_loop = 0;
  2136. while (1) {
  2137. u64 bytes;
  2138. l = path->nodes[0];
  2139. slot = path->slots[0];
  2140. if (slot >= btrfs_header_nritems(l)) {
  2141. ret = btrfs_next_leaf(root, path);
  2142. if (ret == 0)
  2143. continue;
  2144. if (ret < 0)
  2145. goto out;
  2146. stop_loop = 1;
  2147. break;
  2148. }
  2149. btrfs_item_key_to_cpu(l, &key, slot);
  2150. if (key.type == BTRFS_METADATA_ITEM_KEY)
  2151. bytes = root->leafsize;
  2152. else
  2153. bytes = key.offset;
  2154. if (key.objectid + bytes <= logical)
  2155. goto next;
  2156. if (key.type != BTRFS_EXTENT_ITEM_KEY &&
  2157. key.type != BTRFS_METADATA_ITEM_KEY)
  2158. goto next;
  2159. if (key.objectid >= logical + map->stripe_len) {
  2160. /* out of this device extent */
  2161. if (key.objectid >= logic_end)
  2162. stop_loop = 1;
  2163. break;
  2164. }
  2165. extent = btrfs_item_ptr(l, slot,
  2166. struct btrfs_extent_item);
  2167. flags = btrfs_extent_flags(l, extent);
  2168. generation = btrfs_extent_generation(l, extent);
  2169. if (key.objectid < logical &&
  2170. (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) {
  2171. printk(KERN_ERR
  2172. "btrfs scrub: tree block %llu spanning "
  2173. "stripes, ignored. logical=%llu\n",
  2174. (unsigned long long)key.objectid,
  2175. (unsigned long long)logical);
  2176. goto next;
  2177. }
  2178. again:
  2179. extent_logical = key.objectid;
  2180. extent_len = bytes;
  2181. /*
  2182. * trim extent to this stripe
  2183. */
  2184. if (extent_logical < logical) {
  2185. extent_len -= logical - extent_logical;
  2186. extent_logical = logical;
  2187. }
  2188. if (extent_logical + extent_len >
  2189. logical + map->stripe_len) {
  2190. extent_len = logical + map->stripe_len -
  2191. extent_logical;
  2192. }
  2193. extent_physical = extent_logical - logical + physical;
  2194. extent_dev = scrub_dev;
  2195. extent_mirror_num = mirror_num;
  2196. if (is_dev_replace)
  2197. scrub_remap_extent(fs_info, extent_logical,
  2198. extent_len, &extent_physical,
  2199. &extent_dev,
  2200. &extent_mirror_num);
  2201. ret = btrfs_lookup_csums_range(csum_root, logical,
  2202. logical + map->stripe_len - 1,
  2203. &sctx->csum_list, 1);
  2204. if (ret)
  2205. goto out;
  2206. ret = scrub_extent(sctx, extent_logical, extent_len,
  2207. extent_physical, extent_dev, flags,
  2208. generation, extent_mirror_num,
  2209. extent_logical - logical + physical);
  2210. if (ret)
  2211. goto out;
  2212. scrub_free_csums(sctx);
  2213. if (extent_logical + extent_len <
  2214. key.objectid + bytes) {
  2215. logical += increment;
  2216. physical += map->stripe_len;
  2217. if (logical < key.objectid + bytes) {
  2218. cond_resched();
  2219. goto again;
  2220. }
  2221. if (logical >= logic_end) {
  2222. stop_loop = 1;
  2223. break;
  2224. }
  2225. }
  2226. next:
  2227. path->slots[0]++;
  2228. }
  2229. btrfs_release_path(path);
  2230. logical += increment;
  2231. physical += map->stripe_len;
  2232. spin_lock(&sctx->stat_lock);
  2233. if (stop_loop)
  2234. sctx->stat.last_physical = map->stripes[num].physical +
  2235. length;
  2236. else
  2237. sctx->stat.last_physical = physical;
  2238. spin_unlock(&sctx->stat_lock);
  2239. if (stop_loop)
  2240. break;
  2241. }
  2242. out:
  2243. /* push queued extents */
  2244. scrub_submit(sctx);
  2245. mutex_lock(&sctx->wr_ctx.wr_lock);
  2246. scrub_wr_submit(sctx);
  2247. mutex_unlock(&sctx->wr_ctx.wr_lock);
  2248. blk_finish_plug(&plug);
  2249. btrfs_free_path(path);
  2250. return ret < 0 ? ret : 0;
  2251. }
  2252. static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
  2253. struct btrfs_device *scrub_dev,
  2254. u64 chunk_tree, u64 chunk_objectid,
  2255. u64 chunk_offset, u64 length,
  2256. u64 dev_offset, int is_dev_replace)
  2257. {
  2258. struct btrfs_mapping_tree *map_tree =
  2259. &sctx->dev_root->fs_info->mapping_tree;
  2260. struct map_lookup *map;
  2261. struct extent_map *em;
  2262. int i;
  2263. int ret = 0;
  2264. read_lock(&map_tree->map_tree.lock);
  2265. em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
  2266. read_unlock(&map_tree->map_tree.lock);
  2267. if (!em)
  2268. return -EINVAL;
  2269. map = (struct map_lookup *)em->bdev;
  2270. if (em->start != chunk_offset)
  2271. goto out;
  2272. if (em->len < length)
  2273. goto out;
  2274. for (i = 0; i < map->num_stripes; ++i) {
  2275. if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
  2276. map->stripes[i].physical == dev_offset) {
  2277. ret = scrub_stripe(sctx, map, scrub_dev, i,
  2278. chunk_offset, length,
  2279. is_dev_replace);
  2280. if (ret)
  2281. goto out;
  2282. }
  2283. }
  2284. out:
  2285. free_extent_map(em);
  2286. return ret;
  2287. }
  2288. static noinline_for_stack
  2289. int scrub_enumerate_chunks(struct scrub_ctx *sctx,
  2290. struct btrfs_device *scrub_dev, u64 start, u64 end,
  2291. int is_dev_replace)
  2292. {
  2293. struct btrfs_dev_extent *dev_extent = NULL;
  2294. struct btrfs_path *path;
  2295. struct btrfs_root *root = sctx->dev_root;
  2296. struct btrfs_fs_info *fs_info = root->fs_info;
  2297. u64 length;
  2298. u64 chunk_tree;
  2299. u64 chunk_objectid;
  2300. u64 chunk_offset;
  2301. int ret;
  2302. int slot;
  2303. struct extent_buffer *l;
  2304. struct btrfs_key key;
  2305. struct btrfs_key found_key;
  2306. struct btrfs_block_group_cache *cache;
  2307. struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
  2308. path = btrfs_alloc_path();
  2309. if (!path)
  2310. return -ENOMEM;
  2311. path->reada = 2;
  2312. path->search_commit_root = 1;
  2313. path->skip_locking = 1;
  2314. key.objectid = scrub_dev->devid;
  2315. key.offset = 0ull;
  2316. key.type = BTRFS_DEV_EXTENT_KEY;
  2317. while (1) {
  2318. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  2319. if (ret < 0)
  2320. break;
  2321. if (ret > 0) {
  2322. if (path->slots[0] >=
  2323. btrfs_header_nritems(path->nodes[0])) {
  2324. ret = btrfs_next_leaf(root, path);
  2325. if (ret)
  2326. break;
  2327. }
  2328. }
  2329. l = path->nodes[0];
  2330. slot = path->slots[0];
  2331. btrfs_item_key_to_cpu(l, &found_key, slot);
  2332. if (found_key.objectid != scrub_dev->devid)
  2333. break;
  2334. if (btrfs_key_type(&found_key) != BTRFS_DEV_EXTENT_KEY)
  2335. break;
  2336. if (found_key.offset >= end)
  2337. break;
  2338. if (found_key.offset < key.offset)
  2339. break;
  2340. dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
  2341. length = btrfs_dev_extent_length(l, dev_extent);
  2342. if (found_key.offset + length <= start) {
  2343. key.offset = found_key.offset + length;
  2344. btrfs_release_path(path);
  2345. continue;
  2346. }
  2347. chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
  2348. chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
  2349. chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
  2350. /*
  2351. * get a reference on the corresponding block group to prevent
  2352. * the chunk from going away while we scrub it
  2353. */
  2354. cache = btrfs_lookup_block_group(fs_info, chunk_offset);
  2355. if (!cache) {
  2356. ret = -ENOENT;
  2357. break;
  2358. }
  2359. dev_replace->cursor_right = found_key.offset + length;
  2360. dev_replace->cursor_left = found_key.offset;
  2361. dev_replace->item_needs_writeback = 1;
  2362. ret = scrub_chunk(sctx, scrub_dev, chunk_tree, chunk_objectid,
  2363. chunk_offset, length, found_key.offset,
  2364. is_dev_replace);
  2365. /*
  2366. * flush, submit all pending read and write bios, afterwards
  2367. * wait for them.
  2368. * Note that in the dev replace case, a read request causes
  2369. * write requests that are submitted in the read completion
  2370. * worker. Therefore in the current situation, it is required
  2371. * that all write requests are flushed, so that all read and
  2372. * write requests are really completed when bios_in_flight
  2373. * changes to 0.
  2374. */
  2375. atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
  2376. scrub_submit(sctx);
  2377. mutex_lock(&sctx->wr_ctx.wr_lock);
  2378. scrub_wr_submit(sctx);
  2379. mutex_unlock(&sctx->wr_ctx.wr_lock);
  2380. wait_event(sctx->list_wait,
  2381. atomic_read(&sctx->bios_in_flight) == 0);
  2382. atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
  2383. atomic_inc(&fs_info->scrubs_paused);
  2384. wake_up(&fs_info->scrub_pause_wait);
  2385. wait_event(sctx->list_wait,
  2386. atomic_read(&sctx->workers_pending) == 0);
  2387. mutex_lock(&fs_info->scrub_lock);
  2388. while (atomic_read(&fs_info->scrub_pause_req)) {
  2389. mutex_unlock(&fs_info->scrub_lock);
  2390. wait_event(fs_info->scrub_pause_wait,
  2391. atomic_read(&fs_info->scrub_pause_req) == 0);
  2392. mutex_lock(&fs_info->scrub_lock);
  2393. }
  2394. atomic_dec(&fs_info->scrubs_paused);
  2395. mutex_unlock(&fs_info->scrub_lock);
  2396. wake_up(&fs_info->scrub_pause_wait);
  2397. dev_replace->cursor_left = dev_replace->cursor_right;
  2398. dev_replace->item_needs_writeback = 1;
  2399. btrfs_put_block_group(cache);
  2400. if (ret)
  2401. break;
  2402. if (is_dev_replace &&
  2403. atomic64_read(&dev_replace->num_write_errors) > 0) {
  2404. ret = -EIO;
  2405. break;
  2406. }
  2407. if (sctx->stat.malloc_errors > 0) {
  2408. ret = -ENOMEM;
  2409. break;
  2410. }
  2411. key.offset = found_key.offset + length;
  2412. btrfs_release_path(path);
  2413. }
  2414. btrfs_free_path(path);
  2415. /*
  2416. * ret can still be 1 from search_slot or next_leaf,
  2417. * that's not an error
  2418. */
  2419. return ret < 0 ? ret : 0;
  2420. }
  2421. static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
  2422. struct btrfs_device *scrub_dev)
  2423. {
  2424. int i;
  2425. u64 bytenr;
  2426. u64 gen;
  2427. int ret;
  2428. struct btrfs_root *root = sctx->dev_root;
  2429. if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
  2430. return -EIO;
  2431. gen = root->fs_info->last_trans_committed;
  2432. for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
  2433. bytenr = btrfs_sb_offset(i);
  2434. if (bytenr + BTRFS_SUPER_INFO_SIZE > scrub_dev->total_bytes)
  2435. break;
  2436. ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
  2437. scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i,
  2438. NULL, 1, bytenr);
  2439. if (ret)
  2440. return ret;
  2441. }
  2442. wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
  2443. return 0;
  2444. }
  2445. /*
  2446. * get a reference count on fs_info->scrub_workers. start worker if necessary
  2447. */
  2448. static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
  2449. int is_dev_replace)
  2450. {
  2451. int ret = 0;
  2452. mutex_lock(&fs_info->scrub_lock);
  2453. if (fs_info->scrub_workers_refcnt == 0) {
  2454. if (is_dev_replace)
  2455. btrfs_init_workers(&fs_info->scrub_workers, "scrub", 1,
  2456. &fs_info->generic_worker);
  2457. else
  2458. btrfs_init_workers(&fs_info->scrub_workers, "scrub",
  2459. fs_info->thread_pool_size,
  2460. &fs_info->generic_worker);
  2461. fs_info->scrub_workers.idle_thresh = 4;
  2462. ret = btrfs_start_workers(&fs_info->scrub_workers);
  2463. if (ret)
  2464. goto out;
  2465. btrfs_init_workers(&fs_info->scrub_wr_completion_workers,
  2466. "scrubwrc",
  2467. fs_info->thread_pool_size,
  2468. &fs_info->generic_worker);
  2469. fs_info->scrub_wr_completion_workers.idle_thresh = 2;
  2470. ret = btrfs_start_workers(
  2471. &fs_info->scrub_wr_completion_workers);
  2472. if (ret)
  2473. goto out;
  2474. btrfs_init_workers(&fs_info->scrub_nocow_workers, "scrubnc", 1,
  2475. &fs_info->generic_worker);
  2476. ret = btrfs_start_workers(&fs_info->scrub_nocow_workers);
  2477. if (ret)
  2478. goto out;
  2479. }
  2480. ++fs_info->scrub_workers_refcnt;
  2481. out:
  2482. mutex_unlock(&fs_info->scrub_lock);
  2483. return ret;
  2484. }
  2485. static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info)
  2486. {
  2487. mutex_lock(&fs_info->scrub_lock);
  2488. if (--fs_info->scrub_workers_refcnt == 0) {
  2489. btrfs_stop_workers(&fs_info->scrub_workers);
  2490. btrfs_stop_workers(&fs_info->scrub_wr_completion_workers);
  2491. btrfs_stop_workers(&fs_info->scrub_nocow_workers);
  2492. }
  2493. WARN_ON(fs_info->scrub_workers_refcnt < 0);
  2494. mutex_unlock(&fs_info->scrub_lock);
  2495. }
  2496. int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
  2497. u64 end, struct btrfs_scrub_progress *progress,
  2498. int readonly, int is_dev_replace)
  2499. {
  2500. struct scrub_ctx *sctx;
  2501. int ret;
  2502. struct btrfs_device *dev;
  2503. if (btrfs_fs_closing(fs_info))
  2504. return -EINVAL;
  2505. /*
  2506. * check some assumptions
  2507. */
  2508. if (fs_info->chunk_root->nodesize != fs_info->chunk_root->leafsize) {
  2509. printk(KERN_ERR
  2510. "btrfs_scrub: size assumption nodesize == leafsize (%d == %d) fails\n",
  2511. fs_info->chunk_root->nodesize,
  2512. fs_info->chunk_root->leafsize);
  2513. return -EINVAL;
  2514. }
  2515. if (fs_info->chunk_root->nodesize > BTRFS_STRIPE_LEN) {
  2516. /*
  2517. * in this case scrub is unable to calculate the checksum
  2518. * the way scrub is implemented. Do not handle this
  2519. * situation at all because it won't ever happen.
  2520. */
  2521. printk(KERN_ERR
  2522. "btrfs_scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails\n",
  2523. fs_info->chunk_root->nodesize, BTRFS_STRIPE_LEN);
  2524. return -EINVAL;
  2525. }
  2526. if (fs_info->chunk_root->sectorsize != PAGE_SIZE) {
  2527. /* not supported for data w/o checksums */
  2528. printk(KERN_ERR
  2529. "btrfs_scrub: size assumption sectorsize != PAGE_SIZE (%d != %lld) fails\n",
  2530. fs_info->chunk_root->sectorsize,
  2531. (unsigned long long)PAGE_SIZE);
  2532. return -EINVAL;
  2533. }
  2534. if (fs_info->chunk_root->nodesize >
  2535. PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK ||
  2536. fs_info->chunk_root->sectorsize >
  2537. PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) {
  2538. /*
  2539. * would exhaust the array bounds of pagev member in
  2540. * struct scrub_block
  2541. */
  2542. pr_err("btrfs_scrub: size assumption nodesize and sectorsize <= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails\n",
  2543. fs_info->chunk_root->nodesize,
  2544. SCRUB_MAX_PAGES_PER_BLOCK,
  2545. fs_info->chunk_root->sectorsize,
  2546. SCRUB_MAX_PAGES_PER_BLOCK);
  2547. return -EINVAL;
  2548. }
  2549. ret = scrub_workers_get(fs_info, is_dev_replace);
  2550. if (ret)
  2551. return ret;
  2552. mutex_lock(&fs_info->fs_devices->device_list_mutex);
  2553. dev = btrfs_find_device(fs_info, devid, NULL, NULL);
  2554. if (!dev || (dev->missing && !is_dev_replace)) {
  2555. mutex_unlock(&fs_info->fs_devices->device_list_mutex);
  2556. scrub_workers_put(fs_info);
  2557. return -ENODEV;
  2558. }
  2559. mutex_lock(&fs_info->scrub_lock);
  2560. if (!dev->in_fs_metadata || dev->is_tgtdev_for_dev_replace) {
  2561. mutex_unlock(&fs_info->scrub_lock);
  2562. mutex_unlock(&fs_info->fs_devices->device_list_mutex);
  2563. scrub_workers_put(fs_info);
  2564. return -EIO;
  2565. }
  2566. btrfs_dev_replace_lock(&fs_info->dev_replace);
  2567. if (dev->scrub_device ||
  2568. (!is_dev_replace &&
  2569. btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
  2570. btrfs_dev_replace_unlock(&fs_info->dev_replace);
  2571. mutex_unlock(&fs_info->scrub_lock);
  2572. mutex_unlock(&fs_info->fs_devices->device_list_mutex);
  2573. scrub_workers_put(fs_info);
  2574. return -EINPROGRESS;
  2575. }
  2576. btrfs_dev_replace_unlock(&fs_info->dev_replace);
  2577. sctx = scrub_setup_ctx(dev, is_dev_replace);
  2578. if (IS_ERR(sctx)) {
  2579. mutex_unlock(&fs_info->scrub_lock);
  2580. mutex_unlock(&fs_info->fs_devices->device_list_mutex);
  2581. scrub_workers_put(fs_info);
  2582. return PTR_ERR(sctx);
  2583. }
  2584. sctx->readonly = readonly;
  2585. dev->scrub_device = sctx;
  2586. atomic_inc(&fs_info->scrubs_running);
  2587. mutex_unlock(&fs_info->scrub_lock);
  2588. mutex_unlock(&fs_info->fs_devices->device_list_mutex);
  2589. if (!is_dev_replace) {
  2590. down_read(&fs_info->scrub_super_lock);
  2591. ret = scrub_supers(sctx, dev);
  2592. up_read(&fs_info->scrub_super_lock);
  2593. }
  2594. if (!ret)
  2595. ret = scrub_enumerate_chunks(sctx, dev, start, end,
  2596. is_dev_replace);
  2597. wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
  2598. atomic_dec(&fs_info->scrubs_running);
  2599. wake_up(&fs_info->scrub_pause_wait);
  2600. wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0);
  2601. if (progress)
  2602. memcpy(progress, &sctx->stat, sizeof(*progress));
  2603. mutex_lock(&fs_info->scrub_lock);
  2604. dev->scrub_device = NULL;
  2605. mutex_unlock(&fs_info->scrub_lock);
  2606. scrub_free_ctx(sctx);
  2607. scrub_workers_put(fs_info);
  2608. return ret;
  2609. }
  2610. void btrfs_scrub_pause(struct btrfs_root *root)
  2611. {
  2612. struct btrfs_fs_info *fs_info = root->fs_info;
  2613. mutex_lock(&fs_info->scrub_lock);
  2614. atomic_inc(&fs_info->scrub_pause_req);
  2615. while (atomic_read(&fs_info->scrubs_paused) !=
  2616. atomic_read(&fs_info->scrubs_running)) {
  2617. mutex_unlock(&fs_info->scrub_lock);
  2618. wait_event(fs_info->scrub_pause_wait,
  2619. atomic_read(&fs_info->scrubs_paused) ==
  2620. atomic_read(&fs_info->scrubs_running));
  2621. mutex_lock(&fs_info->scrub_lock);
  2622. }
  2623. mutex_unlock(&fs_info->scrub_lock);
  2624. }
  2625. void btrfs_scrub_continue(struct btrfs_root *root)
  2626. {
  2627. struct btrfs_fs_info *fs_info = root->fs_info;
  2628. atomic_dec(&fs_info->scrub_pause_req);
  2629. wake_up(&fs_info->scrub_pause_wait);
  2630. }
  2631. void btrfs_scrub_pause_super(struct btrfs_root *root)
  2632. {
  2633. down_write(&root->fs_info->scrub_super_lock);
  2634. }
  2635. void btrfs_scrub_continue_super(struct btrfs_root *root)
  2636. {
  2637. up_write(&root->fs_info->scrub_super_lock);
  2638. }
  2639. int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
  2640. {
  2641. mutex_lock(&fs_info->scrub_lock);
  2642. if (!atomic_read(&fs_info->scrubs_running)) {
  2643. mutex_unlock(&fs_info->scrub_lock);
  2644. return -ENOTCONN;
  2645. }
  2646. atomic_inc(&fs_info->scrub_cancel_req);
  2647. while (atomic_read(&fs_info->scrubs_running)) {
  2648. mutex_unlock(&fs_info->scrub_lock);
  2649. wait_event(fs_info->scrub_pause_wait,
  2650. atomic_read(&fs_info->scrubs_running) == 0);
  2651. mutex_lock(&fs_info->scrub_lock);
  2652. }
  2653. atomic_dec(&fs_info->scrub_cancel_req);
  2654. mutex_unlock(&fs_info->scrub_lock);
  2655. return 0;
  2656. }
  2657. int btrfs_scrub_cancel_dev(struct btrfs_fs_info *fs_info,
  2658. struct btrfs_device *dev)
  2659. {
  2660. struct scrub_ctx *sctx;
  2661. mutex_lock(&fs_info->scrub_lock);
  2662. sctx = dev->scrub_device;
  2663. if (!sctx) {
  2664. mutex_unlock(&fs_info->scrub_lock);
  2665. return -ENOTCONN;
  2666. }
  2667. atomic_inc(&sctx->cancel_req);
  2668. while (dev->scrub_device) {
  2669. mutex_unlock(&fs_info->scrub_lock);
  2670. wait_event(fs_info->scrub_pause_wait,
  2671. dev->scrub_device == NULL);
  2672. mutex_lock(&fs_info->scrub_lock);
  2673. }
  2674. mutex_unlock(&fs_info->scrub_lock);
  2675. return 0;
  2676. }
  2677. int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
  2678. struct btrfs_scrub_progress *progress)
  2679. {
  2680. struct btrfs_device *dev;
  2681. struct scrub_ctx *sctx = NULL;
  2682. mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
  2683. dev = btrfs_find_device(root->fs_info, devid, NULL, NULL);
  2684. if (dev)
  2685. sctx = dev->scrub_device;
  2686. if (sctx)
  2687. memcpy(progress, &sctx->stat, sizeof(*progress));
  2688. mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
  2689. return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
  2690. }
  2691. static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
  2692. u64 extent_logical, u64 extent_len,
  2693. u64 *extent_physical,
  2694. struct btrfs_device **extent_dev,
  2695. int *extent_mirror_num)
  2696. {
  2697. u64 mapped_length;
  2698. struct btrfs_bio *bbio = NULL;
  2699. int ret;
  2700. mapped_length = extent_len;
  2701. ret = btrfs_map_block(fs_info, READ, extent_logical,
  2702. &mapped_length, &bbio, 0);
  2703. if (ret || !bbio || mapped_length < extent_len ||
  2704. !bbio->stripes[0].dev->bdev) {
  2705. kfree(bbio);
  2706. return;
  2707. }
  2708. *extent_physical = bbio->stripes[0].physical;
  2709. *extent_mirror_num = bbio->mirror_num;
  2710. *extent_dev = bbio->stripes[0].dev;
  2711. kfree(bbio);
  2712. }
  2713. static int scrub_setup_wr_ctx(struct scrub_ctx *sctx,
  2714. struct scrub_wr_ctx *wr_ctx,
  2715. struct btrfs_fs_info *fs_info,
  2716. struct btrfs_device *dev,
  2717. int is_dev_replace)
  2718. {
  2719. WARN_ON(wr_ctx->wr_curr_bio != NULL);
  2720. mutex_init(&wr_ctx->wr_lock);
  2721. wr_ctx->wr_curr_bio = NULL;
  2722. if (!is_dev_replace)
  2723. return 0;
  2724. WARN_ON(!dev->bdev);
  2725. wr_ctx->pages_per_wr_bio = min_t(int, SCRUB_PAGES_PER_WR_BIO,
  2726. bio_get_nr_vecs(dev->bdev));
  2727. wr_ctx->tgtdev = dev;
  2728. atomic_set(&wr_ctx->flush_all_writes, 0);
  2729. return 0;
  2730. }
  2731. static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx)
  2732. {
  2733. mutex_lock(&wr_ctx->wr_lock);
  2734. kfree(wr_ctx->wr_curr_bio);
  2735. wr_ctx->wr_curr_bio = NULL;
  2736. mutex_unlock(&wr_ctx->wr_lock);
  2737. }
  2738. static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
  2739. int mirror_num, u64 physical_for_dev_replace)
  2740. {
  2741. struct scrub_copy_nocow_ctx *nocow_ctx;
  2742. struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
  2743. nocow_ctx = kzalloc(sizeof(*nocow_ctx), GFP_NOFS);
  2744. if (!nocow_ctx) {
  2745. spin_lock(&sctx->stat_lock);
  2746. sctx->stat.malloc_errors++;
  2747. spin_unlock(&sctx->stat_lock);
  2748. return -ENOMEM;
  2749. }
  2750. scrub_pending_trans_workers_inc(sctx);
  2751. nocow_ctx->sctx = sctx;
  2752. nocow_ctx->logical = logical;
  2753. nocow_ctx->len = len;
  2754. nocow_ctx->mirror_num = mirror_num;
  2755. nocow_ctx->physical_for_dev_replace = physical_for_dev_replace;
  2756. nocow_ctx->work.func = copy_nocow_pages_worker;
  2757. btrfs_queue_worker(&fs_info->scrub_nocow_workers,
  2758. &nocow_ctx->work);
  2759. return 0;
  2760. }
  2761. static void copy_nocow_pages_worker(struct btrfs_work *work)
  2762. {
  2763. struct scrub_copy_nocow_ctx *nocow_ctx =
  2764. container_of(work, struct scrub_copy_nocow_ctx, work);
  2765. struct scrub_ctx *sctx = nocow_ctx->sctx;
  2766. u64 logical = nocow_ctx->logical;
  2767. u64 len = nocow_ctx->len;
  2768. int mirror_num = nocow_ctx->mirror_num;
  2769. u64 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
  2770. int ret;
  2771. struct btrfs_trans_handle *trans = NULL;
  2772. struct btrfs_fs_info *fs_info;
  2773. struct btrfs_path *path;
  2774. struct btrfs_root *root;
  2775. int not_written = 0;
  2776. fs_info = sctx->dev_root->fs_info;
  2777. root = fs_info->extent_root;
  2778. path = btrfs_alloc_path();
  2779. if (!path) {
  2780. spin_lock(&sctx->stat_lock);
  2781. sctx->stat.malloc_errors++;
  2782. spin_unlock(&sctx->stat_lock);
  2783. not_written = 1;
  2784. goto out;
  2785. }
  2786. trans = btrfs_join_transaction(root);
  2787. if (IS_ERR(trans)) {
  2788. not_written = 1;
  2789. goto out;
  2790. }
  2791. ret = iterate_inodes_from_logical(logical, fs_info, path,
  2792. copy_nocow_pages_for_inode,
  2793. nocow_ctx);
  2794. if (ret != 0 && ret != -ENOENT) {
  2795. pr_warn("iterate_inodes_from_logical() failed: log %llu, phys %llu, len %llu, mir %llu, ret %d\n",
  2796. (unsigned long long)logical,
  2797. (unsigned long long)physical_for_dev_replace,
  2798. (unsigned long long)len,
  2799. (unsigned long long)mirror_num, ret);
  2800. not_written = 1;
  2801. goto out;
  2802. }
  2803. out:
  2804. if (trans && !IS_ERR(trans))
  2805. btrfs_end_transaction(trans, root);
  2806. if (not_written)
  2807. btrfs_dev_replace_stats_inc(&fs_info->dev_replace.
  2808. num_uncorrectable_read_errors);
  2809. btrfs_free_path(path);
  2810. kfree(nocow_ctx);
  2811. scrub_pending_trans_workers_dec(sctx);
  2812. }
  2813. static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root, void *ctx)
  2814. {
  2815. struct scrub_copy_nocow_ctx *nocow_ctx = ctx;
  2816. struct btrfs_fs_info *fs_info = nocow_ctx->sctx->dev_root->fs_info;
  2817. struct btrfs_key key;
  2818. struct inode *inode;
  2819. struct page *page;
  2820. struct btrfs_root *local_root;
  2821. u64 physical_for_dev_replace;
  2822. u64 len;
  2823. unsigned long index;
  2824. int srcu_index;
  2825. int ret;
  2826. int err;
  2827. key.objectid = root;
  2828. key.type = BTRFS_ROOT_ITEM_KEY;
  2829. key.offset = (u64)-1;
  2830. srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
  2831. local_root = btrfs_read_fs_root_no_name(fs_info, &key);
  2832. if (IS_ERR(local_root)) {
  2833. srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
  2834. return PTR_ERR(local_root);
  2835. }
  2836. if (btrfs_root_refs(&local_root->root_item) == 0) {
  2837. srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
  2838. return -ENOENT;
  2839. }
  2840. key.type = BTRFS_INODE_ITEM_KEY;
  2841. key.objectid = inum;
  2842. key.offset = 0;
  2843. inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
  2844. srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
  2845. if (IS_ERR(inode))
  2846. return PTR_ERR(inode);
  2847. /* Avoid truncate/dio/punch hole.. */
  2848. mutex_lock(&inode->i_mutex);
  2849. inode_dio_wait(inode);
  2850. ret = 0;
  2851. physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
  2852. len = nocow_ctx->len;
  2853. while (len >= PAGE_CACHE_SIZE) {
  2854. index = offset >> PAGE_CACHE_SHIFT;
  2855. again:
  2856. page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
  2857. if (!page) {
  2858. pr_err("find_or_create_page() failed\n");
  2859. ret = -ENOMEM;
  2860. goto out;
  2861. }
  2862. if (PageUptodate(page)) {
  2863. if (PageDirty(page))
  2864. goto next_page;
  2865. } else {
  2866. ClearPageError(page);
  2867. err = extent_read_full_page(&BTRFS_I(inode)->
  2868. io_tree,
  2869. page, btrfs_get_extent,
  2870. nocow_ctx->mirror_num);
  2871. if (err) {
  2872. ret = err;
  2873. goto next_page;
  2874. }
  2875. lock_page(page);
  2876. /*
  2877. * If the page has been remove from the page cache,
  2878. * the data on it is meaningless, because it may be
  2879. * old one, the new data may be written into the new
  2880. * page in the page cache.
  2881. */
  2882. if (page->mapping != inode->i_mapping) {
  2883. page_cache_release(page);
  2884. goto again;
  2885. }
  2886. if (!PageUptodate(page)) {
  2887. ret = -EIO;
  2888. goto next_page;
  2889. }
  2890. }
  2891. err = write_page_nocow(nocow_ctx->sctx,
  2892. physical_for_dev_replace, page);
  2893. if (err)
  2894. ret = err;
  2895. next_page:
  2896. unlock_page(page);
  2897. page_cache_release(page);
  2898. if (ret)
  2899. break;
  2900. offset += PAGE_CACHE_SIZE;
  2901. physical_for_dev_replace += PAGE_CACHE_SIZE;
  2902. len -= PAGE_CACHE_SIZE;
  2903. }
  2904. out:
  2905. mutex_unlock(&inode->i_mutex);
  2906. iput(inode);
  2907. return ret;
  2908. }
  2909. static int write_page_nocow(struct scrub_ctx *sctx,
  2910. u64 physical_for_dev_replace, struct page *page)
  2911. {
  2912. struct bio *bio;
  2913. struct btrfs_device *dev;
  2914. int ret;
  2915. DECLARE_COMPLETION_ONSTACK(compl);
  2916. dev = sctx->wr_ctx.tgtdev;
  2917. if (!dev)
  2918. return -EIO;
  2919. if (!dev->bdev) {
  2920. printk_ratelimited(KERN_WARNING
  2921. "btrfs: scrub write_page_nocow(bdev == NULL) is unexpected!\n");
  2922. return -EIO;
  2923. }
  2924. bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
  2925. if (!bio) {
  2926. spin_lock(&sctx->stat_lock);
  2927. sctx->stat.malloc_errors++;
  2928. spin_unlock(&sctx->stat_lock);
  2929. return -ENOMEM;
  2930. }
  2931. bio->bi_private = &compl;
  2932. bio->bi_end_io = scrub_complete_bio_end_io;
  2933. bio->bi_size = 0;
  2934. bio->bi_sector = physical_for_dev_replace >> 9;
  2935. bio->bi_bdev = dev->bdev;
  2936. ret = bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
  2937. if (ret != PAGE_CACHE_SIZE) {
  2938. leave_with_eio:
  2939. bio_put(bio);
  2940. btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
  2941. return -EIO;
  2942. }
  2943. btrfsic_submit_bio(WRITE_SYNC, bio);
  2944. wait_for_completion(&compl);
  2945. if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
  2946. goto leave_with_eio;
  2947. bio_put(bio);
  2948. return 0;
  2949. }