scrub.c 65 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445
  1. /*
  2. * Copyright (C) 2011 STRATO. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/blkdev.h>
  19. #include <linux/ratelimit.h>
  20. #include "ctree.h"
  21. #include "volumes.h"
  22. #include "disk-io.h"
  23. #include "ordered-data.h"
  24. #include "transaction.h"
  25. #include "backref.h"
  26. #include "extent_io.h"
  27. #include "check-integrity.h"
  28. /*
  29. * This is only the first step towards a full-features scrub. It reads all
  30. * extent and super block and verifies the checksums. In case a bad checksum
  31. * is found or the extent cannot be read, good data will be written back if
  32. * any can be found.
  33. *
  34. * Future enhancements:
  35. * - In case an unrepairable extent is encountered, track which files are
  36. * affected and report them
  37. * - track and record media errors, throw out bad devices
  38. * - add a mode to also read unallocated space
  39. */
  40. struct scrub_block;
  41. struct scrub_dev;
  42. #define SCRUB_PAGES_PER_BIO 16 /* 64k per bio */
  43. #define SCRUB_BIOS_PER_DEV 16 /* 1 MB per device in flight */
  44. #define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */
  45. struct scrub_page {
  46. struct scrub_block *sblock;
  47. struct page *page;
  48. struct block_device *bdev;
  49. u64 flags; /* extent flags */
  50. u64 generation;
  51. u64 logical;
  52. u64 physical;
  53. struct {
  54. unsigned int mirror_num:8;
  55. unsigned int have_csum:1;
  56. unsigned int io_error:1;
  57. };
  58. u8 csum[BTRFS_CSUM_SIZE];
  59. };
  60. struct scrub_bio {
  61. int index;
  62. struct scrub_dev *sdev;
  63. struct bio *bio;
  64. int err;
  65. u64 logical;
  66. u64 physical;
  67. struct scrub_page *pagev[SCRUB_PAGES_PER_BIO];
  68. int page_count;
  69. int next_free;
  70. struct btrfs_work work;
  71. };
  72. struct scrub_block {
  73. struct scrub_page pagev[SCRUB_MAX_PAGES_PER_BLOCK];
  74. int page_count;
  75. atomic_t outstanding_pages;
  76. atomic_t ref_count; /* free mem on transition to zero */
  77. struct scrub_dev *sdev;
  78. struct {
  79. unsigned int header_error:1;
  80. unsigned int checksum_error:1;
  81. unsigned int no_io_error_seen:1;
  82. };
  83. };
  84. struct scrub_dev {
  85. struct scrub_bio *bios[SCRUB_BIOS_PER_DEV];
  86. struct btrfs_device *dev;
  87. int first_free;
  88. int curr;
  89. atomic_t in_flight;
  90. atomic_t fixup_cnt;
  91. spinlock_t list_lock;
  92. wait_queue_head_t list_wait;
  93. u16 csum_size;
  94. struct list_head csum_list;
  95. atomic_t cancel_req;
  96. int readonly;
  97. int pages_per_bio; /* <= SCRUB_PAGES_PER_BIO */
  98. u32 sectorsize;
  99. u32 nodesize;
  100. u32 leafsize;
  101. /*
  102. * statistics
  103. */
  104. struct btrfs_scrub_progress stat;
  105. spinlock_t stat_lock;
  106. };
  107. struct scrub_fixup_nodatasum {
  108. struct scrub_dev *sdev;
  109. u64 logical;
  110. struct btrfs_root *root;
  111. struct btrfs_work work;
  112. int mirror_num;
  113. };
  114. struct scrub_warning {
  115. struct btrfs_path *path;
  116. u64 extent_item_size;
  117. char *scratch_buf;
  118. char *msg_buf;
  119. const char *errstr;
  120. sector_t sector;
  121. u64 logical;
  122. struct btrfs_device *dev;
  123. int msg_bufsize;
  124. int scratch_bufsize;
  125. };
  126. static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
  127. static int scrub_setup_recheck_block(struct scrub_dev *sdev,
  128. struct btrfs_mapping_tree *map_tree,
  129. u64 length, u64 logical,
  130. struct scrub_block *sblock);
  131. static int scrub_recheck_block(struct btrfs_fs_info *fs_info,
  132. struct scrub_block *sblock, int is_metadata,
  133. int have_csum, u8 *csum, u64 generation,
  134. u16 csum_size);
  135. static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
  136. struct scrub_block *sblock,
  137. int is_metadata, int have_csum,
  138. const u8 *csum, u64 generation,
  139. u16 csum_size);
  140. static void scrub_complete_bio_end_io(struct bio *bio, int err);
  141. static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
  142. struct scrub_block *sblock_good,
  143. int force_write);
  144. static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
  145. struct scrub_block *sblock_good,
  146. int page_num, int force_write);
  147. static int scrub_checksum_data(struct scrub_block *sblock);
  148. static int scrub_checksum_tree_block(struct scrub_block *sblock);
  149. static int scrub_checksum_super(struct scrub_block *sblock);
  150. static void scrub_block_get(struct scrub_block *sblock);
  151. static void scrub_block_put(struct scrub_block *sblock);
  152. static int scrub_add_page_to_bio(struct scrub_dev *sdev,
  153. struct scrub_page *spage);
  154. static int scrub_pages(struct scrub_dev *sdev, u64 logical, u64 len,
  155. u64 physical, u64 flags, u64 gen, int mirror_num,
  156. u8 *csum, int force);
  157. static void scrub_bio_end_io(struct bio *bio, int err);
  158. static void scrub_bio_end_io_worker(struct btrfs_work *work);
  159. static void scrub_block_complete(struct scrub_block *sblock);
  160. static void scrub_free_csums(struct scrub_dev *sdev)
  161. {
  162. while (!list_empty(&sdev->csum_list)) {
  163. struct btrfs_ordered_sum *sum;
  164. sum = list_first_entry(&sdev->csum_list,
  165. struct btrfs_ordered_sum, list);
  166. list_del(&sum->list);
  167. kfree(sum);
  168. }
  169. }
  170. static noinline_for_stack void scrub_free_dev(struct scrub_dev *sdev)
  171. {
  172. int i;
  173. if (!sdev)
  174. return;
  175. /* this can happen when scrub is cancelled */
  176. if (sdev->curr != -1) {
  177. struct scrub_bio *sbio = sdev->bios[sdev->curr];
  178. for (i = 0; i < sbio->page_count; i++) {
  179. BUG_ON(!sbio->pagev[i]);
  180. BUG_ON(!sbio->pagev[i]->page);
  181. scrub_block_put(sbio->pagev[i]->sblock);
  182. }
  183. bio_put(sbio->bio);
  184. }
  185. for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) {
  186. struct scrub_bio *sbio = sdev->bios[i];
  187. if (!sbio)
  188. break;
  189. kfree(sbio);
  190. }
  191. scrub_free_csums(sdev);
  192. kfree(sdev);
  193. }
  194. static noinline_for_stack
  195. struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev)
  196. {
  197. struct scrub_dev *sdev;
  198. int i;
  199. struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
  200. int pages_per_bio;
  201. pages_per_bio = min_t(int, SCRUB_PAGES_PER_BIO,
  202. bio_get_nr_vecs(dev->bdev));
  203. sdev = kzalloc(sizeof(*sdev), GFP_NOFS);
  204. if (!sdev)
  205. goto nomem;
  206. sdev->dev = dev;
  207. sdev->pages_per_bio = pages_per_bio;
  208. sdev->curr = -1;
  209. for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) {
  210. struct scrub_bio *sbio;
  211. sbio = kzalloc(sizeof(*sbio), GFP_NOFS);
  212. if (!sbio)
  213. goto nomem;
  214. sdev->bios[i] = sbio;
  215. sbio->index = i;
  216. sbio->sdev = sdev;
  217. sbio->page_count = 0;
  218. sbio->work.func = scrub_bio_end_io_worker;
  219. if (i != SCRUB_BIOS_PER_DEV-1)
  220. sdev->bios[i]->next_free = i + 1;
  221. else
  222. sdev->bios[i]->next_free = -1;
  223. }
  224. sdev->first_free = 0;
  225. sdev->nodesize = dev->dev_root->nodesize;
  226. sdev->leafsize = dev->dev_root->leafsize;
  227. sdev->sectorsize = dev->dev_root->sectorsize;
  228. atomic_set(&sdev->in_flight, 0);
  229. atomic_set(&sdev->fixup_cnt, 0);
  230. atomic_set(&sdev->cancel_req, 0);
  231. sdev->csum_size = btrfs_super_csum_size(fs_info->super_copy);
  232. INIT_LIST_HEAD(&sdev->csum_list);
  233. spin_lock_init(&sdev->list_lock);
  234. spin_lock_init(&sdev->stat_lock);
  235. init_waitqueue_head(&sdev->list_wait);
  236. return sdev;
  237. nomem:
  238. scrub_free_dev(sdev);
  239. return ERR_PTR(-ENOMEM);
  240. }
  241. static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *ctx)
  242. {
  243. u64 isize;
  244. u32 nlink;
  245. int ret;
  246. int i;
  247. struct extent_buffer *eb;
  248. struct btrfs_inode_item *inode_item;
  249. struct scrub_warning *swarn = ctx;
  250. struct btrfs_fs_info *fs_info = swarn->dev->dev_root->fs_info;
  251. struct inode_fs_paths *ipath = NULL;
  252. struct btrfs_root *local_root;
  253. struct btrfs_key root_key;
  254. root_key.objectid = root;
  255. root_key.type = BTRFS_ROOT_ITEM_KEY;
  256. root_key.offset = (u64)-1;
  257. local_root = btrfs_read_fs_root_no_name(fs_info, &root_key);
  258. if (IS_ERR(local_root)) {
  259. ret = PTR_ERR(local_root);
  260. goto err;
  261. }
  262. ret = inode_item_info(inum, 0, local_root, swarn->path);
  263. if (ret) {
  264. btrfs_release_path(swarn->path);
  265. goto err;
  266. }
  267. eb = swarn->path->nodes[0];
  268. inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
  269. struct btrfs_inode_item);
  270. isize = btrfs_inode_size(eb, inode_item);
  271. nlink = btrfs_inode_nlink(eb, inode_item);
  272. btrfs_release_path(swarn->path);
  273. ipath = init_ipath(4096, local_root, swarn->path);
  274. if (IS_ERR(ipath)) {
  275. ret = PTR_ERR(ipath);
  276. ipath = NULL;
  277. goto err;
  278. }
  279. ret = paths_from_inode(inum, ipath);
  280. if (ret < 0)
  281. goto err;
  282. /*
  283. * we deliberately ignore the bit ipath might have been too small to
  284. * hold all of the paths here
  285. */
  286. for (i = 0; i < ipath->fspath->elem_cnt; ++i)
  287. printk(KERN_WARNING "btrfs: %s at logical %llu on dev "
  288. "%s, sector %llu, root %llu, inode %llu, offset %llu, "
  289. "length %llu, links %u (path: %s)\n", swarn->errstr,
  290. swarn->logical, swarn->dev->name,
  291. (unsigned long long)swarn->sector, root, inum, offset,
  292. min(isize - offset, (u64)PAGE_SIZE), nlink,
  293. (char *)(unsigned long)ipath->fspath->val[i]);
  294. free_ipath(ipath);
  295. return 0;
  296. err:
  297. printk(KERN_WARNING "btrfs: %s at logical %llu on dev "
  298. "%s, sector %llu, root %llu, inode %llu, offset %llu: path "
  299. "resolving failed with ret=%d\n", swarn->errstr,
  300. swarn->logical, swarn->dev->name,
  301. (unsigned long long)swarn->sector, root, inum, offset, ret);
  302. free_ipath(ipath);
  303. return 0;
  304. }
  305. static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
  306. {
  307. struct btrfs_device *dev = sblock->sdev->dev;
  308. struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
  309. struct btrfs_path *path;
  310. struct btrfs_key found_key;
  311. struct extent_buffer *eb;
  312. struct btrfs_extent_item *ei;
  313. struct scrub_warning swarn;
  314. u32 item_size;
  315. int ret;
  316. u64 ref_root;
  317. u8 ref_level;
  318. unsigned long ptr = 0;
  319. const int bufsize = 4096;
  320. u64 extent_item_pos;
  321. path = btrfs_alloc_path();
  322. swarn.scratch_buf = kmalloc(bufsize, GFP_NOFS);
  323. swarn.msg_buf = kmalloc(bufsize, GFP_NOFS);
  324. BUG_ON(sblock->page_count < 1);
  325. swarn.sector = (sblock->pagev[0].physical) >> 9;
  326. swarn.logical = sblock->pagev[0].logical;
  327. swarn.errstr = errstr;
  328. swarn.dev = dev;
  329. swarn.msg_bufsize = bufsize;
  330. swarn.scratch_bufsize = bufsize;
  331. if (!path || !swarn.scratch_buf || !swarn.msg_buf)
  332. goto out;
  333. ret = extent_from_logical(fs_info, swarn.logical, path, &found_key);
  334. if (ret < 0)
  335. goto out;
  336. extent_item_pos = swarn.logical - found_key.objectid;
  337. swarn.extent_item_size = found_key.offset;
  338. eb = path->nodes[0];
  339. ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
  340. item_size = btrfs_item_size_nr(eb, path->slots[0]);
  341. btrfs_release_path(path);
  342. if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
  343. do {
  344. ret = tree_backref_for_extent(&ptr, eb, ei, item_size,
  345. &ref_root, &ref_level);
  346. printk(KERN_WARNING
  347. "btrfs: %s at logical %llu on dev %s, "
  348. "sector %llu: metadata %s (level %d) in tree "
  349. "%llu\n", errstr, swarn.logical, dev->name,
  350. (unsigned long long)swarn.sector,
  351. ref_level ? "node" : "leaf",
  352. ret < 0 ? -1 : ref_level,
  353. ret < 0 ? -1 : ref_root);
  354. } while (ret != 1);
  355. } else {
  356. swarn.path = path;
  357. iterate_extent_inodes(fs_info, found_key.objectid,
  358. extent_item_pos, 1,
  359. scrub_print_warning_inode, &swarn);
  360. }
  361. out:
  362. btrfs_free_path(path);
  363. kfree(swarn.scratch_buf);
  364. kfree(swarn.msg_buf);
  365. }
  366. static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *ctx)
  367. {
  368. struct page *page = NULL;
  369. unsigned long index;
  370. struct scrub_fixup_nodatasum *fixup = ctx;
  371. int ret;
  372. int corrected = 0;
  373. struct btrfs_key key;
  374. struct inode *inode = NULL;
  375. u64 end = offset + PAGE_SIZE - 1;
  376. struct btrfs_root *local_root;
  377. key.objectid = root;
  378. key.type = BTRFS_ROOT_ITEM_KEY;
  379. key.offset = (u64)-1;
  380. local_root = btrfs_read_fs_root_no_name(fixup->root->fs_info, &key);
  381. if (IS_ERR(local_root))
  382. return PTR_ERR(local_root);
  383. key.type = BTRFS_INODE_ITEM_KEY;
  384. key.objectid = inum;
  385. key.offset = 0;
  386. inode = btrfs_iget(fixup->root->fs_info->sb, &key, local_root, NULL);
  387. if (IS_ERR(inode))
  388. return PTR_ERR(inode);
  389. index = offset >> PAGE_CACHE_SHIFT;
  390. page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
  391. if (!page) {
  392. ret = -ENOMEM;
  393. goto out;
  394. }
  395. if (PageUptodate(page)) {
  396. struct btrfs_mapping_tree *map_tree;
  397. if (PageDirty(page)) {
  398. /*
  399. * we need to write the data to the defect sector. the
  400. * data that was in that sector is not in memory,
  401. * because the page was modified. we must not write the
  402. * modified page to that sector.
  403. *
  404. * TODO: what could be done here: wait for the delalloc
  405. * runner to write out that page (might involve
  406. * COW) and see whether the sector is still
  407. * referenced afterwards.
  408. *
  409. * For the meantime, we'll treat this error
  410. * incorrectable, although there is a chance that a
  411. * later scrub will find the bad sector again and that
  412. * there's no dirty page in memory, then.
  413. */
  414. ret = -EIO;
  415. goto out;
  416. }
  417. map_tree = &BTRFS_I(inode)->root->fs_info->mapping_tree;
  418. ret = repair_io_failure(map_tree, offset, PAGE_SIZE,
  419. fixup->logical, page,
  420. fixup->mirror_num);
  421. unlock_page(page);
  422. corrected = !ret;
  423. } else {
  424. /*
  425. * we need to get good data first. the general readpage path
  426. * will call repair_io_failure for us, we just have to make
  427. * sure we read the bad mirror.
  428. */
  429. ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
  430. EXTENT_DAMAGED, GFP_NOFS);
  431. if (ret) {
  432. /* set_extent_bits should give proper error */
  433. WARN_ON(ret > 0);
  434. if (ret > 0)
  435. ret = -EFAULT;
  436. goto out;
  437. }
  438. ret = extent_read_full_page(&BTRFS_I(inode)->io_tree, page,
  439. btrfs_get_extent,
  440. fixup->mirror_num);
  441. wait_on_page_locked(page);
  442. corrected = !test_range_bit(&BTRFS_I(inode)->io_tree, offset,
  443. end, EXTENT_DAMAGED, 0, NULL);
  444. if (!corrected)
  445. clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
  446. EXTENT_DAMAGED, GFP_NOFS);
  447. }
  448. out:
  449. if (page)
  450. put_page(page);
  451. if (inode)
  452. iput(inode);
  453. if (ret < 0)
  454. return ret;
  455. if (ret == 0 && corrected) {
  456. /*
  457. * we only need to call readpage for one of the inodes belonging
  458. * to this extent. so make iterate_extent_inodes stop
  459. */
  460. return 1;
  461. }
  462. return -EIO;
  463. }
  464. static void scrub_fixup_nodatasum(struct btrfs_work *work)
  465. {
  466. int ret;
  467. struct scrub_fixup_nodatasum *fixup;
  468. struct scrub_dev *sdev;
  469. struct btrfs_trans_handle *trans = NULL;
  470. struct btrfs_fs_info *fs_info;
  471. struct btrfs_path *path;
  472. int uncorrectable = 0;
  473. fixup = container_of(work, struct scrub_fixup_nodatasum, work);
  474. sdev = fixup->sdev;
  475. fs_info = fixup->root->fs_info;
  476. path = btrfs_alloc_path();
  477. if (!path) {
  478. spin_lock(&sdev->stat_lock);
  479. ++sdev->stat.malloc_errors;
  480. spin_unlock(&sdev->stat_lock);
  481. uncorrectable = 1;
  482. goto out;
  483. }
  484. trans = btrfs_join_transaction(fixup->root);
  485. if (IS_ERR(trans)) {
  486. uncorrectable = 1;
  487. goto out;
  488. }
  489. /*
  490. * the idea is to trigger a regular read through the standard path. we
  491. * read a page from the (failed) logical address by specifying the
  492. * corresponding copynum of the failed sector. thus, that readpage is
  493. * expected to fail.
  494. * that is the point where on-the-fly error correction will kick in
  495. * (once it's finished) and rewrite the failed sector if a good copy
  496. * can be found.
  497. */
  498. ret = iterate_inodes_from_logical(fixup->logical, fixup->root->fs_info,
  499. path, scrub_fixup_readpage,
  500. fixup);
  501. if (ret < 0) {
  502. uncorrectable = 1;
  503. goto out;
  504. }
  505. WARN_ON(ret != 1);
  506. spin_lock(&sdev->stat_lock);
  507. ++sdev->stat.corrected_errors;
  508. spin_unlock(&sdev->stat_lock);
  509. out:
  510. if (trans && !IS_ERR(trans))
  511. btrfs_end_transaction(trans, fixup->root);
  512. if (uncorrectable) {
  513. spin_lock(&sdev->stat_lock);
  514. ++sdev->stat.uncorrectable_errors;
  515. spin_unlock(&sdev->stat_lock);
  516. printk_ratelimited(KERN_ERR
  517. "btrfs: unable to fixup (nodatasum) error at logical %llu on dev %s\n",
  518. (unsigned long long)fixup->logical, sdev->dev->name);
  519. }
  520. btrfs_free_path(path);
  521. kfree(fixup);
  522. /* see caller why we're pretending to be paused in the scrub counters */
  523. mutex_lock(&fs_info->scrub_lock);
  524. atomic_dec(&fs_info->scrubs_running);
  525. atomic_dec(&fs_info->scrubs_paused);
  526. mutex_unlock(&fs_info->scrub_lock);
  527. atomic_dec(&sdev->fixup_cnt);
  528. wake_up(&fs_info->scrub_pause_wait);
  529. wake_up(&sdev->list_wait);
  530. }
  531. /*
  532. * scrub_handle_errored_block gets called when either verification of the
  533. * pages failed or the bio failed to read, e.g. with EIO. In the latter
  534. * case, this function handles all pages in the bio, even though only one
  535. * may be bad.
  536. * The goal of this function is to repair the errored block by using the
  537. * contents of one of the mirrors.
  538. */
  539. static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
  540. {
  541. struct scrub_dev *sdev = sblock_to_check->sdev;
  542. struct btrfs_fs_info *fs_info;
  543. u64 length;
  544. u64 logical;
  545. u64 generation;
  546. unsigned int failed_mirror_index;
  547. unsigned int is_metadata;
  548. unsigned int have_csum;
  549. u8 *csum;
  550. struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */
  551. struct scrub_block *sblock_bad;
  552. int ret;
  553. int mirror_index;
  554. int page_num;
  555. int success;
  556. static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
  557. DEFAULT_RATELIMIT_BURST);
  558. BUG_ON(sblock_to_check->page_count < 1);
  559. fs_info = sdev->dev->dev_root->fs_info;
  560. length = sblock_to_check->page_count * PAGE_SIZE;
  561. logical = sblock_to_check->pagev[0].logical;
  562. generation = sblock_to_check->pagev[0].generation;
  563. BUG_ON(sblock_to_check->pagev[0].mirror_num < 1);
  564. failed_mirror_index = sblock_to_check->pagev[0].mirror_num - 1;
  565. is_metadata = !(sblock_to_check->pagev[0].flags &
  566. BTRFS_EXTENT_FLAG_DATA);
  567. have_csum = sblock_to_check->pagev[0].have_csum;
  568. csum = sblock_to_check->pagev[0].csum;
  569. /*
  570. * read all mirrors one after the other. This includes to
  571. * re-read the extent or metadata block that failed (that was
  572. * the cause that this fixup code is called) another time,
  573. * page by page this time in order to know which pages
  574. * caused I/O errors and which ones are good (for all mirrors).
  575. * It is the goal to handle the situation when more than one
  576. * mirror contains I/O errors, but the errors do not
  577. * overlap, i.e. the data can be repaired by selecting the
  578. * pages from those mirrors without I/O error on the
  579. * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
  580. * would be that mirror #1 has an I/O error on the first page,
  581. * the second page is good, and mirror #2 has an I/O error on
  582. * the second page, but the first page is good.
  583. * Then the first page of the first mirror can be repaired by
  584. * taking the first page of the second mirror, and the
  585. * second page of the second mirror can be repaired by
  586. * copying the contents of the 2nd page of the 1st mirror.
  587. * One more note: if the pages of one mirror contain I/O
  588. * errors, the checksum cannot be verified. In order to get
  589. * the best data for repairing, the first attempt is to find
  590. * a mirror without I/O errors and with a validated checksum.
  591. * Only if this is not possible, the pages are picked from
  592. * mirrors with I/O errors without considering the checksum.
  593. * If the latter is the case, at the end, the checksum of the
  594. * repaired area is verified in order to correctly maintain
  595. * the statistics.
  596. */
  597. sblocks_for_recheck = kzalloc(BTRFS_MAX_MIRRORS *
  598. sizeof(*sblocks_for_recheck),
  599. GFP_NOFS);
  600. if (!sblocks_for_recheck) {
  601. spin_lock(&sdev->stat_lock);
  602. sdev->stat.malloc_errors++;
  603. sdev->stat.read_errors++;
  604. sdev->stat.uncorrectable_errors++;
  605. spin_unlock(&sdev->stat_lock);
  606. goto out;
  607. }
  608. /* setup the context, map the logical blocks and alloc the pages */
  609. ret = scrub_setup_recheck_block(sdev, &fs_info->mapping_tree, length,
  610. logical, sblocks_for_recheck);
  611. if (ret) {
  612. spin_lock(&sdev->stat_lock);
  613. sdev->stat.read_errors++;
  614. sdev->stat.uncorrectable_errors++;
  615. spin_unlock(&sdev->stat_lock);
  616. goto out;
  617. }
  618. BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
  619. sblock_bad = sblocks_for_recheck + failed_mirror_index;
  620. /* build and submit the bios for the failed mirror, check checksums */
  621. ret = scrub_recheck_block(fs_info, sblock_bad, is_metadata, have_csum,
  622. csum, generation, sdev->csum_size);
  623. if (ret) {
  624. spin_lock(&sdev->stat_lock);
  625. sdev->stat.read_errors++;
  626. sdev->stat.uncorrectable_errors++;
  627. spin_unlock(&sdev->stat_lock);
  628. goto out;
  629. }
  630. if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
  631. sblock_bad->no_io_error_seen) {
  632. /*
  633. * the error disappeared after reading page by page, or
  634. * the area was part of a huge bio and other parts of the
  635. * bio caused I/O errors, or the block layer merged several
  636. * read requests into one and the error is caused by a
  637. * different bio (usually one of the two latter cases is
  638. * the cause)
  639. */
  640. spin_lock(&sdev->stat_lock);
  641. sdev->stat.unverified_errors++;
  642. spin_unlock(&sdev->stat_lock);
  643. goto out;
  644. }
  645. if (!sblock_bad->no_io_error_seen) {
  646. spin_lock(&sdev->stat_lock);
  647. sdev->stat.read_errors++;
  648. spin_unlock(&sdev->stat_lock);
  649. if (__ratelimit(&_rs))
  650. scrub_print_warning("i/o error", sblock_to_check);
  651. } else if (sblock_bad->checksum_error) {
  652. spin_lock(&sdev->stat_lock);
  653. sdev->stat.csum_errors++;
  654. spin_unlock(&sdev->stat_lock);
  655. if (__ratelimit(&_rs))
  656. scrub_print_warning("checksum error", sblock_to_check);
  657. } else if (sblock_bad->header_error) {
  658. spin_lock(&sdev->stat_lock);
  659. sdev->stat.verify_errors++;
  660. spin_unlock(&sdev->stat_lock);
  661. if (__ratelimit(&_rs))
  662. scrub_print_warning("checksum/header error",
  663. sblock_to_check);
  664. }
  665. if (sdev->readonly)
  666. goto did_not_correct_error;
  667. if (!is_metadata && !have_csum) {
  668. struct scrub_fixup_nodatasum *fixup_nodatasum;
  669. /*
  670. * !is_metadata and !have_csum, this means that the data
  671. * might not be COW'ed, that it might be modified
  672. * concurrently. The general strategy to work on the
  673. * commit root does not help in the case when COW is not
  674. * used.
  675. */
  676. fixup_nodatasum = kzalloc(sizeof(*fixup_nodatasum), GFP_NOFS);
  677. if (!fixup_nodatasum)
  678. goto did_not_correct_error;
  679. fixup_nodatasum->sdev = sdev;
  680. fixup_nodatasum->logical = logical;
  681. fixup_nodatasum->root = fs_info->extent_root;
  682. fixup_nodatasum->mirror_num = failed_mirror_index + 1;
  683. /*
  684. * increment scrubs_running to prevent cancel requests from
  685. * completing as long as a fixup worker is running. we must also
  686. * increment scrubs_paused to prevent deadlocking on pause
  687. * requests used for transactions commits (as the worker uses a
  688. * transaction context). it is safe to regard the fixup worker
  689. * as paused for all matters practical. effectively, we only
  690. * avoid cancellation requests from completing.
  691. */
  692. mutex_lock(&fs_info->scrub_lock);
  693. atomic_inc(&fs_info->scrubs_running);
  694. atomic_inc(&fs_info->scrubs_paused);
  695. mutex_unlock(&fs_info->scrub_lock);
  696. atomic_inc(&sdev->fixup_cnt);
  697. fixup_nodatasum->work.func = scrub_fixup_nodatasum;
  698. btrfs_queue_worker(&fs_info->scrub_workers,
  699. &fixup_nodatasum->work);
  700. goto out;
  701. }
  702. /*
  703. * now build and submit the bios for the other mirrors, check
  704. * checksums
  705. */
  706. for (mirror_index = 0;
  707. mirror_index < BTRFS_MAX_MIRRORS &&
  708. sblocks_for_recheck[mirror_index].page_count > 0;
  709. mirror_index++) {
  710. if (mirror_index == failed_mirror_index)
  711. continue;
  712. /* build and submit the bios, check checksums */
  713. ret = scrub_recheck_block(fs_info,
  714. sblocks_for_recheck + mirror_index,
  715. is_metadata, have_csum, csum,
  716. generation, sdev->csum_size);
  717. if (ret)
  718. goto did_not_correct_error;
  719. }
  720. /*
  721. * first try to pick the mirror which is completely without I/O
  722. * errors and also does not have a checksum error.
  723. * If one is found, and if a checksum is present, the full block
  724. * that is known to contain an error is rewritten. Afterwards
  725. * the block is known to be corrected.
  726. * If a mirror is found which is completely correct, and no
  727. * checksum is present, only those pages are rewritten that had
  728. * an I/O error in the block to be repaired, since it cannot be
  729. * determined, which copy of the other pages is better (and it
  730. * could happen otherwise that a correct page would be
  731. * overwritten by a bad one).
  732. */
  733. for (mirror_index = 0;
  734. mirror_index < BTRFS_MAX_MIRRORS &&
  735. sblocks_for_recheck[mirror_index].page_count > 0;
  736. mirror_index++) {
  737. struct scrub_block *sblock_other = sblocks_for_recheck +
  738. mirror_index;
  739. if (!sblock_other->header_error &&
  740. !sblock_other->checksum_error &&
  741. sblock_other->no_io_error_seen) {
  742. int force_write = is_metadata || have_csum;
  743. ret = scrub_repair_block_from_good_copy(sblock_bad,
  744. sblock_other,
  745. force_write);
  746. if (0 == ret)
  747. goto corrected_error;
  748. }
  749. }
  750. /*
  751. * in case of I/O errors in the area that is supposed to be
  752. * repaired, continue by picking good copies of those pages.
  753. * Select the good pages from mirrors to rewrite bad pages from
  754. * the area to fix. Afterwards verify the checksum of the block
  755. * that is supposed to be repaired. This verification step is
  756. * only done for the purpose of statistic counting and for the
  757. * final scrub report, whether errors remain.
  758. * A perfect algorithm could make use of the checksum and try
  759. * all possible combinations of pages from the different mirrors
  760. * until the checksum verification succeeds. For example, when
  761. * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
  762. * of mirror #2 is readable but the final checksum test fails,
  763. * then the 2nd page of mirror #3 could be tried, whether now
  764. * the final checksum succeedes. But this would be a rare
  765. * exception and is therefore not implemented. At least it is
  766. * avoided that the good copy is overwritten.
  767. * A more useful improvement would be to pick the sectors
  768. * without I/O error based on sector sizes (512 bytes on legacy
  769. * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
  770. * mirror could be repaired by taking 512 byte of a different
  771. * mirror, even if other 512 byte sectors in the same PAGE_SIZE
  772. * area are unreadable.
  773. */
  774. /* can only fix I/O errors from here on */
  775. if (sblock_bad->no_io_error_seen)
  776. goto did_not_correct_error;
  777. success = 1;
  778. for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
  779. struct scrub_page *page_bad = sblock_bad->pagev + page_num;
  780. if (!page_bad->io_error)
  781. continue;
  782. for (mirror_index = 0;
  783. mirror_index < BTRFS_MAX_MIRRORS &&
  784. sblocks_for_recheck[mirror_index].page_count > 0;
  785. mirror_index++) {
  786. struct scrub_block *sblock_other = sblocks_for_recheck +
  787. mirror_index;
  788. struct scrub_page *page_other = sblock_other->pagev +
  789. page_num;
  790. if (!page_other->io_error) {
  791. ret = scrub_repair_page_from_good_copy(
  792. sblock_bad, sblock_other, page_num, 0);
  793. if (0 == ret) {
  794. page_bad->io_error = 0;
  795. break; /* succeeded for this page */
  796. }
  797. }
  798. }
  799. if (page_bad->io_error) {
  800. /* did not find a mirror to copy the page from */
  801. success = 0;
  802. }
  803. }
  804. if (success) {
  805. if (is_metadata || have_csum) {
  806. /*
  807. * need to verify the checksum now that all
  808. * sectors on disk are repaired (the write
  809. * request for data to be repaired is on its way).
  810. * Just be lazy and use scrub_recheck_block()
  811. * which re-reads the data before the checksum
  812. * is verified, but most likely the data comes out
  813. * of the page cache.
  814. */
  815. ret = scrub_recheck_block(fs_info, sblock_bad,
  816. is_metadata, have_csum, csum,
  817. generation, sdev->csum_size);
  818. if (!ret && !sblock_bad->header_error &&
  819. !sblock_bad->checksum_error &&
  820. sblock_bad->no_io_error_seen)
  821. goto corrected_error;
  822. else
  823. goto did_not_correct_error;
  824. } else {
  825. corrected_error:
  826. spin_lock(&sdev->stat_lock);
  827. sdev->stat.corrected_errors++;
  828. spin_unlock(&sdev->stat_lock);
  829. printk_ratelimited(KERN_ERR
  830. "btrfs: fixed up error at logical %llu on dev %s\n",
  831. (unsigned long long)logical, sdev->dev->name);
  832. }
  833. } else {
  834. did_not_correct_error:
  835. spin_lock(&sdev->stat_lock);
  836. sdev->stat.uncorrectable_errors++;
  837. spin_unlock(&sdev->stat_lock);
  838. printk_ratelimited(KERN_ERR
  839. "btrfs: unable to fixup (regular) error at logical %llu on dev %s\n",
  840. (unsigned long long)logical, sdev->dev->name);
  841. }
  842. out:
  843. if (sblocks_for_recheck) {
  844. for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS;
  845. mirror_index++) {
  846. struct scrub_block *sblock = sblocks_for_recheck +
  847. mirror_index;
  848. int page_index;
  849. for (page_index = 0; page_index < SCRUB_PAGES_PER_BIO;
  850. page_index++)
  851. if (sblock->pagev[page_index].page)
  852. __free_page(
  853. sblock->pagev[page_index].page);
  854. }
  855. kfree(sblocks_for_recheck);
  856. }
  857. return 0;
  858. }
  859. static int scrub_setup_recheck_block(struct scrub_dev *sdev,
  860. struct btrfs_mapping_tree *map_tree,
  861. u64 length, u64 logical,
  862. struct scrub_block *sblocks_for_recheck)
  863. {
  864. int page_index;
  865. int mirror_index;
  866. int ret;
  867. /*
  868. * note: the three members sdev, ref_count and outstanding_pages
  869. * are not used (and not set) in the blocks that are used for
  870. * the recheck procedure
  871. */
  872. page_index = 0;
  873. while (length > 0) {
  874. u64 sublen = min_t(u64, length, PAGE_SIZE);
  875. u64 mapped_length = sublen;
  876. struct btrfs_bio *bbio = NULL;
  877. /*
  878. * with a length of PAGE_SIZE, each returned stripe
  879. * represents one mirror
  880. */
  881. ret = btrfs_map_block(map_tree, WRITE, logical, &mapped_length,
  882. &bbio, 0);
  883. if (ret || !bbio || mapped_length < sublen) {
  884. kfree(bbio);
  885. return -EIO;
  886. }
  887. BUG_ON(page_index >= SCRUB_PAGES_PER_BIO);
  888. for (mirror_index = 0; mirror_index < (int)bbio->num_stripes;
  889. mirror_index++) {
  890. struct scrub_block *sblock;
  891. struct scrub_page *page;
  892. if (mirror_index >= BTRFS_MAX_MIRRORS)
  893. continue;
  894. sblock = sblocks_for_recheck + mirror_index;
  895. page = sblock->pagev + page_index;
  896. page->logical = logical;
  897. page->physical = bbio->stripes[mirror_index].physical;
  898. page->bdev = bbio->stripes[mirror_index].dev->bdev;
  899. page->mirror_num = mirror_index + 1;
  900. page->page = alloc_page(GFP_NOFS);
  901. if (!page->page) {
  902. spin_lock(&sdev->stat_lock);
  903. sdev->stat.malloc_errors++;
  904. spin_unlock(&sdev->stat_lock);
  905. return -ENOMEM;
  906. }
  907. sblock->page_count++;
  908. }
  909. kfree(bbio);
  910. length -= sublen;
  911. logical += sublen;
  912. page_index++;
  913. }
  914. return 0;
  915. }
  916. /*
  917. * this function will check the on disk data for checksum errors, header
  918. * errors and read I/O errors. If any I/O errors happen, the exact pages
  919. * which are errored are marked as being bad. The goal is to enable scrub
  920. * to take those pages that are not errored from all the mirrors so that
  921. * the pages that are errored in the just handled mirror can be repaired.
  922. */
  923. static int scrub_recheck_block(struct btrfs_fs_info *fs_info,
  924. struct scrub_block *sblock, int is_metadata,
  925. int have_csum, u8 *csum, u64 generation,
  926. u16 csum_size)
  927. {
  928. int page_num;
  929. sblock->no_io_error_seen = 1;
  930. sblock->header_error = 0;
  931. sblock->checksum_error = 0;
  932. for (page_num = 0; page_num < sblock->page_count; page_num++) {
  933. struct bio *bio;
  934. int ret;
  935. struct scrub_page *page = sblock->pagev + page_num;
  936. DECLARE_COMPLETION_ONSTACK(complete);
  937. BUG_ON(!page->page);
  938. bio = bio_alloc(GFP_NOFS, 1);
  939. bio->bi_bdev = page->bdev;
  940. bio->bi_sector = page->physical >> 9;
  941. bio->bi_end_io = scrub_complete_bio_end_io;
  942. bio->bi_private = &complete;
  943. ret = bio_add_page(bio, page->page, PAGE_SIZE, 0);
  944. if (PAGE_SIZE != ret) {
  945. bio_put(bio);
  946. return -EIO;
  947. }
  948. btrfsic_submit_bio(READ, bio);
  949. /* this will also unplug the queue */
  950. wait_for_completion(&complete);
  951. page->io_error = !test_bit(BIO_UPTODATE, &bio->bi_flags);
  952. if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
  953. sblock->no_io_error_seen = 0;
  954. bio_put(bio);
  955. }
  956. if (sblock->no_io_error_seen)
  957. scrub_recheck_block_checksum(fs_info, sblock, is_metadata,
  958. have_csum, csum, generation,
  959. csum_size);
  960. return 0;
  961. }
  962. static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
  963. struct scrub_block *sblock,
  964. int is_metadata, int have_csum,
  965. const u8 *csum, u64 generation,
  966. u16 csum_size)
  967. {
  968. int page_num;
  969. u8 calculated_csum[BTRFS_CSUM_SIZE];
  970. u32 crc = ~(u32)0;
  971. struct btrfs_root *root = fs_info->extent_root;
  972. void *mapped_buffer;
  973. BUG_ON(!sblock->pagev[0].page);
  974. if (is_metadata) {
  975. struct btrfs_header *h;
  976. mapped_buffer = kmap_atomic(sblock->pagev[0].page, KM_USER0);
  977. h = (struct btrfs_header *)mapped_buffer;
  978. if (sblock->pagev[0].logical != le64_to_cpu(h->bytenr) ||
  979. generation != le64_to_cpu(h->generation) ||
  980. memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE) ||
  981. memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
  982. BTRFS_UUID_SIZE))
  983. sblock->header_error = 1;
  984. csum = h->csum;
  985. } else {
  986. if (!have_csum)
  987. return;
  988. mapped_buffer = kmap_atomic(sblock->pagev[0].page, KM_USER0);
  989. }
  990. for (page_num = 0;;) {
  991. if (page_num == 0 && is_metadata)
  992. crc = btrfs_csum_data(root,
  993. ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE,
  994. crc, PAGE_SIZE - BTRFS_CSUM_SIZE);
  995. else
  996. crc = btrfs_csum_data(root, mapped_buffer, crc,
  997. PAGE_SIZE);
  998. kunmap_atomic(mapped_buffer, KM_USER0);
  999. page_num++;
  1000. if (page_num >= sblock->page_count)
  1001. break;
  1002. BUG_ON(!sblock->pagev[page_num].page);
  1003. mapped_buffer = kmap_atomic(sblock->pagev[page_num].page,
  1004. KM_USER0);
  1005. }
  1006. btrfs_csum_final(crc, calculated_csum);
  1007. if (memcmp(calculated_csum, csum, csum_size))
  1008. sblock->checksum_error = 1;
  1009. }
  1010. static void scrub_complete_bio_end_io(struct bio *bio, int err)
  1011. {
  1012. complete((struct completion *)bio->bi_private);
  1013. }
  1014. static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
  1015. struct scrub_block *sblock_good,
  1016. int force_write)
  1017. {
  1018. int page_num;
  1019. int ret = 0;
  1020. for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
  1021. int ret_sub;
  1022. ret_sub = scrub_repair_page_from_good_copy(sblock_bad,
  1023. sblock_good,
  1024. page_num,
  1025. force_write);
  1026. if (ret_sub)
  1027. ret = ret_sub;
  1028. }
  1029. return ret;
  1030. }
  1031. static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
  1032. struct scrub_block *sblock_good,
  1033. int page_num, int force_write)
  1034. {
  1035. struct scrub_page *page_bad = sblock_bad->pagev + page_num;
  1036. struct scrub_page *page_good = sblock_good->pagev + page_num;
  1037. BUG_ON(sblock_bad->pagev[page_num].page == NULL);
  1038. BUG_ON(sblock_good->pagev[page_num].page == NULL);
  1039. if (force_write || sblock_bad->header_error ||
  1040. sblock_bad->checksum_error || page_bad->io_error) {
  1041. struct bio *bio;
  1042. int ret;
  1043. DECLARE_COMPLETION_ONSTACK(complete);
  1044. bio = bio_alloc(GFP_NOFS, 1);
  1045. bio->bi_bdev = page_bad->bdev;
  1046. bio->bi_sector = page_bad->physical >> 9;
  1047. bio->bi_end_io = scrub_complete_bio_end_io;
  1048. bio->bi_private = &complete;
  1049. ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
  1050. if (PAGE_SIZE != ret) {
  1051. bio_put(bio);
  1052. return -EIO;
  1053. }
  1054. btrfsic_submit_bio(WRITE, bio);
  1055. /* this will also unplug the queue */
  1056. wait_for_completion(&complete);
  1057. bio_put(bio);
  1058. }
  1059. return 0;
  1060. }
  1061. static void scrub_checksum(struct scrub_block *sblock)
  1062. {
  1063. u64 flags;
  1064. int ret;
  1065. BUG_ON(sblock->page_count < 1);
  1066. flags = sblock->pagev[0].flags;
  1067. ret = 0;
  1068. if (flags & BTRFS_EXTENT_FLAG_DATA)
  1069. ret = scrub_checksum_data(sblock);
  1070. else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
  1071. ret = scrub_checksum_tree_block(sblock);
  1072. else if (flags & BTRFS_EXTENT_FLAG_SUPER)
  1073. (void)scrub_checksum_super(sblock);
  1074. else
  1075. WARN_ON(1);
  1076. if (ret)
  1077. scrub_handle_errored_block(sblock);
  1078. }
  1079. static int scrub_checksum_data(struct scrub_block *sblock)
  1080. {
  1081. struct scrub_dev *sdev = sblock->sdev;
  1082. u8 csum[BTRFS_CSUM_SIZE];
  1083. u8 *on_disk_csum;
  1084. struct page *page;
  1085. void *buffer;
  1086. u32 crc = ~(u32)0;
  1087. int fail = 0;
  1088. struct btrfs_root *root = sdev->dev->dev_root;
  1089. u64 len;
  1090. int index;
  1091. BUG_ON(sblock->page_count < 1);
  1092. if (!sblock->pagev[0].have_csum)
  1093. return 0;
  1094. on_disk_csum = sblock->pagev[0].csum;
  1095. page = sblock->pagev[0].page;
  1096. buffer = kmap_atomic(page, KM_USER0);
  1097. len = sdev->sectorsize;
  1098. index = 0;
  1099. for (;;) {
  1100. u64 l = min_t(u64, len, PAGE_SIZE);
  1101. crc = btrfs_csum_data(root, buffer, crc, l);
  1102. kunmap_atomic(buffer, KM_USER0);
  1103. len -= l;
  1104. if (len == 0)
  1105. break;
  1106. index++;
  1107. BUG_ON(index >= sblock->page_count);
  1108. BUG_ON(!sblock->pagev[index].page);
  1109. page = sblock->pagev[index].page;
  1110. buffer = kmap_atomic(page, KM_USER0);
  1111. }
  1112. btrfs_csum_final(crc, csum);
  1113. if (memcmp(csum, on_disk_csum, sdev->csum_size))
  1114. fail = 1;
  1115. if (fail) {
  1116. spin_lock(&sdev->stat_lock);
  1117. ++sdev->stat.csum_errors;
  1118. spin_unlock(&sdev->stat_lock);
  1119. }
  1120. return fail;
  1121. }
  1122. static int scrub_checksum_tree_block(struct scrub_block *sblock)
  1123. {
  1124. struct scrub_dev *sdev = sblock->sdev;
  1125. struct btrfs_header *h;
  1126. struct btrfs_root *root = sdev->dev->dev_root;
  1127. struct btrfs_fs_info *fs_info = root->fs_info;
  1128. u8 calculated_csum[BTRFS_CSUM_SIZE];
  1129. u8 on_disk_csum[BTRFS_CSUM_SIZE];
  1130. struct page *page;
  1131. void *mapped_buffer;
  1132. u64 mapped_size;
  1133. void *p;
  1134. u32 crc = ~(u32)0;
  1135. int fail = 0;
  1136. int crc_fail = 0;
  1137. u64 len;
  1138. int index;
  1139. BUG_ON(sblock->page_count < 1);
  1140. page = sblock->pagev[0].page;
  1141. mapped_buffer = kmap_atomic(page, KM_USER0);
  1142. h = (struct btrfs_header *)mapped_buffer;
  1143. memcpy(on_disk_csum, h->csum, sdev->csum_size);
  1144. /*
  1145. * we don't use the getter functions here, as we
  1146. * a) don't have an extent buffer and
  1147. * b) the page is already kmapped
  1148. */
  1149. if (sblock->pagev[0].logical != le64_to_cpu(h->bytenr))
  1150. ++fail;
  1151. if (sblock->pagev[0].generation != le64_to_cpu(h->generation))
  1152. ++fail;
  1153. if (memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
  1154. ++fail;
  1155. if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
  1156. BTRFS_UUID_SIZE))
  1157. ++fail;
  1158. BUG_ON(sdev->nodesize != sdev->leafsize);
  1159. len = sdev->nodesize - BTRFS_CSUM_SIZE;
  1160. mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
  1161. p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
  1162. index = 0;
  1163. for (;;) {
  1164. u64 l = min_t(u64, len, mapped_size);
  1165. crc = btrfs_csum_data(root, p, crc, l);
  1166. kunmap_atomic(mapped_buffer, KM_USER0);
  1167. len -= l;
  1168. if (len == 0)
  1169. break;
  1170. index++;
  1171. BUG_ON(index >= sblock->page_count);
  1172. BUG_ON(!sblock->pagev[index].page);
  1173. page = sblock->pagev[index].page;
  1174. mapped_buffer = kmap_atomic(page, KM_USER0);
  1175. mapped_size = PAGE_SIZE;
  1176. p = mapped_buffer;
  1177. }
  1178. btrfs_csum_final(crc, calculated_csum);
  1179. if (memcmp(calculated_csum, on_disk_csum, sdev->csum_size))
  1180. ++crc_fail;
  1181. if (crc_fail || fail) {
  1182. spin_lock(&sdev->stat_lock);
  1183. if (crc_fail)
  1184. ++sdev->stat.csum_errors;
  1185. if (fail)
  1186. ++sdev->stat.verify_errors;
  1187. spin_unlock(&sdev->stat_lock);
  1188. }
  1189. return fail || crc_fail;
  1190. }
  1191. static int scrub_checksum_super(struct scrub_block *sblock)
  1192. {
  1193. struct btrfs_super_block *s;
  1194. struct scrub_dev *sdev = sblock->sdev;
  1195. struct btrfs_root *root = sdev->dev->dev_root;
  1196. struct btrfs_fs_info *fs_info = root->fs_info;
  1197. u8 calculated_csum[BTRFS_CSUM_SIZE];
  1198. u8 on_disk_csum[BTRFS_CSUM_SIZE];
  1199. struct page *page;
  1200. void *mapped_buffer;
  1201. u64 mapped_size;
  1202. void *p;
  1203. u32 crc = ~(u32)0;
  1204. int fail = 0;
  1205. u64 len;
  1206. int index;
  1207. BUG_ON(sblock->page_count < 1);
  1208. page = sblock->pagev[0].page;
  1209. mapped_buffer = kmap_atomic(page, KM_USER0);
  1210. s = (struct btrfs_super_block *)mapped_buffer;
  1211. memcpy(on_disk_csum, s->csum, sdev->csum_size);
  1212. if (sblock->pagev[0].logical != le64_to_cpu(s->bytenr))
  1213. ++fail;
  1214. if (sblock->pagev[0].generation != le64_to_cpu(s->generation))
  1215. ++fail;
  1216. if (memcmp(s->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
  1217. ++fail;
  1218. len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE;
  1219. mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
  1220. p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
  1221. index = 0;
  1222. for (;;) {
  1223. u64 l = min_t(u64, len, mapped_size);
  1224. crc = btrfs_csum_data(root, p, crc, l);
  1225. kunmap_atomic(mapped_buffer, KM_USER0);
  1226. len -= l;
  1227. if (len == 0)
  1228. break;
  1229. index++;
  1230. BUG_ON(index >= sblock->page_count);
  1231. BUG_ON(!sblock->pagev[index].page);
  1232. page = sblock->pagev[index].page;
  1233. mapped_buffer = kmap_atomic(page, KM_USER0);
  1234. mapped_size = PAGE_SIZE;
  1235. p = mapped_buffer;
  1236. }
  1237. btrfs_csum_final(crc, calculated_csum);
  1238. if (memcmp(calculated_csum, on_disk_csum, sdev->csum_size))
  1239. ++fail;
  1240. if (fail) {
  1241. /*
  1242. * if we find an error in a super block, we just report it.
  1243. * They will get written with the next transaction commit
  1244. * anyway
  1245. */
  1246. spin_lock(&sdev->stat_lock);
  1247. ++sdev->stat.super_errors;
  1248. spin_unlock(&sdev->stat_lock);
  1249. }
  1250. return fail;
  1251. }
  1252. static void scrub_block_get(struct scrub_block *sblock)
  1253. {
  1254. atomic_inc(&sblock->ref_count);
  1255. }
  1256. static void scrub_block_put(struct scrub_block *sblock)
  1257. {
  1258. if (atomic_dec_and_test(&sblock->ref_count)) {
  1259. int i;
  1260. for (i = 0; i < sblock->page_count; i++)
  1261. if (sblock->pagev[i].page)
  1262. __free_page(sblock->pagev[i].page);
  1263. kfree(sblock);
  1264. }
  1265. }
  1266. static void scrub_submit(struct scrub_dev *sdev)
  1267. {
  1268. struct scrub_bio *sbio;
  1269. if (sdev->curr == -1)
  1270. return;
  1271. sbio = sdev->bios[sdev->curr];
  1272. sdev->curr = -1;
  1273. atomic_inc(&sdev->in_flight);
  1274. btrfsic_submit_bio(READ, sbio->bio);
  1275. }
  1276. static int scrub_add_page_to_bio(struct scrub_dev *sdev,
  1277. struct scrub_page *spage)
  1278. {
  1279. struct scrub_block *sblock = spage->sblock;
  1280. struct scrub_bio *sbio;
  1281. int ret;
  1282. again:
  1283. /*
  1284. * grab a fresh bio or wait for one to become available
  1285. */
  1286. while (sdev->curr == -1) {
  1287. spin_lock(&sdev->list_lock);
  1288. sdev->curr = sdev->first_free;
  1289. if (sdev->curr != -1) {
  1290. sdev->first_free = sdev->bios[sdev->curr]->next_free;
  1291. sdev->bios[sdev->curr]->next_free = -1;
  1292. sdev->bios[sdev->curr]->page_count = 0;
  1293. spin_unlock(&sdev->list_lock);
  1294. } else {
  1295. spin_unlock(&sdev->list_lock);
  1296. wait_event(sdev->list_wait, sdev->first_free != -1);
  1297. }
  1298. }
  1299. sbio = sdev->bios[sdev->curr];
  1300. if (sbio->page_count == 0) {
  1301. struct bio *bio;
  1302. sbio->physical = spage->physical;
  1303. sbio->logical = spage->logical;
  1304. bio = sbio->bio;
  1305. if (!bio) {
  1306. bio = bio_alloc(GFP_NOFS, sdev->pages_per_bio);
  1307. if (!bio)
  1308. return -ENOMEM;
  1309. sbio->bio = bio;
  1310. }
  1311. bio->bi_private = sbio;
  1312. bio->bi_end_io = scrub_bio_end_io;
  1313. bio->bi_bdev = sdev->dev->bdev;
  1314. bio->bi_sector = spage->physical >> 9;
  1315. sbio->err = 0;
  1316. } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
  1317. spage->physical ||
  1318. sbio->logical + sbio->page_count * PAGE_SIZE !=
  1319. spage->logical) {
  1320. scrub_submit(sdev);
  1321. goto again;
  1322. }
  1323. sbio->pagev[sbio->page_count] = spage;
  1324. ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
  1325. if (ret != PAGE_SIZE) {
  1326. if (sbio->page_count < 1) {
  1327. bio_put(sbio->bio);
  1328. sbio->bio = NULL;
  1329. return -EIO;
  1330. }
  1331. scrub_submit(sdev);
  1332. goto again;
  1333. }
  1334. scrub_block_get(sblock); /* one for the added page */
  1335. atomic_inc(&sblock->outstanding_pages);
  1336. sbio->page_count++;
  1337. if (sbio->page_count == sdev->pages_per_bio)
  1338. scrub_submit(sdev);
  1339. return 0;
  1340. }
  1341. static int scrub_pages(struct scrub_dev *sdev, u64 logical, u64 len,
  1342. u64 physical, u64 flags, u64 gen, int mirror_num,
  1343. u8 *csum, int force)
  1344. {
  1345. struct scrub_block *sblock;
  1346. int index;
  1347. sblock = kzalloc(sizeof(*sblock), GFP_NOFS);
  1348. if (!sblock) {
  1349. spin_lock(&sdev->stat_lock);
  1350. sdev->stat.malloc_errors++;
  1351. spin_unlock(&sdev->stat_lock);
  1352. return -ENOMEM;
  1353. }
  1354. /* one ref inside this function, plus one for each page later on */
  1355. atomic_set(&sblock->ref_count, 1);
  1356. sblock->sdev = sdev;
  1357. sblock->no_io_error_seen = 1;
  1358. for (index = 0; len > 0; index++) {
  1359. struct scrub_page *spage = sblock->pagev + index;
  1360. u64 l = min_t(u64, len, PAGE_SIZE);
  1361. BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
  1362. spage->page = alloc_page(GFP_NOFS);
  1363. if (!spage->page) {
  1364. spin_lock(&sdev->stat_lock);
  1365. sdev->stat.malloc_errors++;
  1366. spin_unlock(&sdev->stat_lock);
  1367. while (index > 0) {
  1368. index--;
  1369. __free_page(sblock->pagev[index].page);
  1370. }
  1371. kfree(sblock);
  1372. return -ENOMEM;
  1373. }
  1374. spage->sblock = sblock;
  1375. spage->bdev = sdev->dev->bdev;
  1376. spage->flags = flags;
  1377. spage->generation = gen;
  1378. spage->logical = logical;
  1379. spage->physical = physical;
  1380. spage->mirror_num = mirror_num;
  1381. if (csum) {
  1382. spage->have_csum = 1;
  1383. memcpy(spage->csum, csum, sdev->csum_size);
  1384. } else {
  1385. spage->have_csum = 0;
  1386. }
  1387. sblock->page_count++;
  1388. len -= l;
  1389. logical += l;
  1390. physical += l;
  1391. }
  1392. BUG_ON(sblock->page_count == 0);
  1393. for (index = 0; index < sblock->page_count; index++) {
  1394. struct scrub_page *spage = sblock->pagev + index;
  1395. int ret;
  1396. ret = scrub_add_page_to_bio(sdev, spage);
  1397. if (ret) {
  1398. scrub_block_put(sblock);
  1399. return ret;
  1400. }
  1401. }
  1402. if (force)
  1403. scrub_submit(sdev);
  1404. /* last one frees, either here or in bio completion for last page */
  1405. scrub_block_put(sblock);
  1406. return 0;
  1407. }
  1408. static void scrub_bio_end_io(struct bio *bio, int err)
  1409. {
  1410. struct scrub_bio *sbio = bio->bi_private;
  1411. struct scrub_dev *sdev = sbio->sdev;
  1412. struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info;
  1413. sbio->err = err;
  1414. sbio->bio = bio;
  1415. btrfs_queue_worker(&fs_info->scrub_workers, &sbio->work);
  1416. }
  1417. static void scrub_bio_end_io_worker(struct btrfs_work *work)
  1418. {
  1419. struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
  1420. struct scrub_dev *sdev = sbio->sdev;
  1421. int i;
  1422. BUG_ON(sbio->page_count > SCRUB_PAGES_PER_BIO);
  1423. if (sbio->err) {
  1424. for (i = 0; i < sbio->page_count; i++) {
  1425. struct scrub_page *spage = sbio->pagev[i];
  1426. spage->io_error = 1;
  1427. spage->sblock->no_io_error_seen = 0;
  1428. }
  1429. }
  1430. /* now complete the scrub_block items that have all pages completed */
  1431. for (i = 0; i < sbio->page_count; i++) {
  1432. struct scrub_page *spage = sbio->pagev[i];
  1433. struct scrub_block *sblock = spage->sblock;
  1434. if (atomic_dec_and_test(&sblock->outstanding_pages))
  1435. scrub_block_complete(sblock);
  1436. scrub_block_put(sblock);
  1437. }
  1438. if (sbio->err) {
  1439. /* what is this good for??? */
  1440. sbio->bio->bi_flags &= ~(BIO_POOL_MASK - 1);
  1441. sbio->bio->bi_flags |= 1 << BIO_UPTODATE;
  1442. sbio->bio->bi_phys_segments = 0;
  1443. sbio->bio->bi_idx = 0;
  1444. for (i = 0; i < sbio->page_count; i++) {
  1445. struct bio_vec *bi;
  1446. bi = &sbio->bio->bi_io_vec[i];
  1447. bi->bv_offset = 0;
  1448. bi->bv_len = PAGE_SIZE;
  1449. }
  1450. }
  1451. bio_put(sbio->bio);
  1452. sbio->bio = NULL;
  1453. spin_lock(&sdev->list_lock);
  1454. sbio->next_free = sdev->first_free;
  1455. sdev->first_free = sbio->index;
  1456. spin_unlock(&sdev->list_lock);
  1457. atomic_dec(&sdev->in_flight);
  1458. wake_up(&sdev->list_wait);
  1459. }
  1460. static void scrub_block_complete(struct scrub_block *sblock)
  1461. {
  1462. if (!sblock->no_io_error_seen)
  1463. scrub_handle_errored_block(sblock);
  1464. else
  1465. scrub_checksum(sblock);
  1466. }
  1467. static int scrub_find_csum(struct scrub_dev *sdev, u64 logical, u64 len,
  1468. u8 *csum)
  1469. {
  1470. struct btrfs_ordered_sum *sum = NULL;
  1471. int ret = 0;
  1472. unsigned long i;
  1473. unsigned long num_sectors;
  1474. while (!list_empty(&sdev->csum_list)) {
  1475. sum = list_first_entry(&sdev->csum_list,
  1476. struct btrfs_ordered_sum, list);
  1477. if (sum->bytenr > logical)
  1478. return 0;
  1479. if (sum->bytenr + sum->len > logical)
  1480. break;
  1481. ++sdev->stat.csum_discards;
  1482. list_del(&sum->list);
  1483. kfree(sum);
  1484. sum = NULL;
  1485. }
  1486. if (!sum)
  1487. return 0;
  1488. num_sectors = sum->len / sdev->sectorsize;
  1489. for (i = 0; i < num_sectors; ++i) {
  1490. if (sum->sums[i].bytenr == logical) {
  1491. memcpy(csum, &sum->sums[i].sum, sdev->csum_size);
  1492. ret = 1;
  1493. break;
  1494. }
  1495. }
  1496. if (ret && i == num_sectors - 1) {
  1497. list_del(&sum->list);
  1498. kfree(sum);
  1499. }
  1500. return ret;
  1501. }
  1502. /* scrub extent tries to collect up to 64 kB for each bio */
  1503. static int scrub_extent(struct scrub_dev *sdev, u64 logical, u64 len,
  1504. u64 physical, u64 flags, u64 gen, int mirror_num)
  1505. {
  1506. int ret;
  1507. u8 csum[BTRFS_CSUM_SIZE];
  1508. u32 blocksize;
  1509. if (flags & BTRFS_EXTENT_FLAG_DATA) {
  1510. blocksize = sdev->sectorsize;
  1511. spin_lock(&sdev->stat_lock);
  1512. sdev->stat.data_extents_scrubbed++;
  1513. sdev->stat.data_bytes_scrubbed += len;
  1514. spin_unlock(&sdev->stat_lock);
  1515. } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
  1516. BUG_ON(sdev->nodesize != sdev->leafsize);
  1517. blocksize = sdev->nodesize;
  1518. spin_lock(&sdev->stat_lock);
  1519. sdev->stat.tree_extents_scrubbed++;
  1520. sdev->stat.tree_bytes_scrubbed += len;
  1521. spin_unlock(&sdev->stat_lock);
  1522. } else {
  1523. blocksize = sdev->sectorsize;
  1524. BUG_ON(1);
  1525. }
  1526. while (len) {
  1527. u64 l = min_t(u64, len, blocksize);
  1528. int have_csum = 0;
  1529. if (flags & BTRFS_EXTENT_FLAG_DATA) {
  1530. /* push csums to sbio */
  1531. have_csum = scrub_find_csum(sdev, logical, l, csum);
  1532. if (have_csum == 0)
  1533. ++sdev->stat.no_csum;
  1534. }
  1535. ret = scrub_pages(sdev, logical, l, physical, flags, gen,
  1536. mirror_num, have_csum ? csum : NULL, 0);
  1537. if (ret)
  1538. return ret;
  1539. len -= l;
  1540. logical += l;
  1541. physical += l;
  1542. }
  1543. return 0;
  1544. }
  1545. static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
  1546. struct map_lookup *map, int num, u64 base, u64 length)
  1547. {
  1548. struct btrfs_path *path;
  1549. struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info;
  1550. struct btrfs_root *root = fs_info->extent_root;
  1551. struct btrfs_root *csum_root = fs_info->csum_root;
  1552. struct btrfs_extent_item *extent;
  1553. struct blk_plug plug;
  1554. u64 flags;
  1555. int ret;
  1556. int slot;
  1557. int i;
  1558. u64 nstripes;
  1559. struct extent_buffer *l;
  1560. struct btrfs_key key;
  1561. u64 physical;
  1562. u64 logical;
  1563. u64 generation;
  1564. int mirror_num;
  1565. struct reada_control *reada1;
  1566. struct reada_control *reada2;
  1567. struct btrfs_key key_start;
  1568. struct btrfs_key key_end;
  1569. u64 increment = map->stripe_len;
  1570. u64 offset;
  1571. nstripes = length;
  1572. offset = 0;
  1573. do_div(nstripes, map->stripe_len);
  1574. if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
  1575. offset = map->stripe_len * num;
  1576. increment = map->stripe_len * map->num_stripes;
  1577. mirror_num = 1;
  1578. } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
  1579. int factor = map->num_stripes / map->sub_stripes;
  1580. offset = map->stripe_len * (num / map->sub_stripes);
  1581. increment = map->stripe_len * factor;
  1582. mirror_num = num % map->sub_stripes + 1;
  1583. } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
  1584. increment = map->stripe_len;
  1585. mirror_num = num % map->num_stripes + 1;
  1586. } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
  1587. increment = map->stripe_len;
  1588. mirror_num = num % map->num_stripes + 1;
  1589. } else {
  1590. increment = map->stripe_len;
  1591. mirror_num = 1;
  1592. }
  1593. path = btrfs_alloc_path();
  1594. if (!path)
  1595. return -ENOMEM;
  1596. /*
  1597. * work on commit root. The related disk blocks are static as
  1598. * long as COW is applied. This means, it is save to rewrite
  1599. * them to repair disk errors without any race conditions
  1600. */
  1601. path->search_commit_root = 1;
  1602. path->skip_locking = 1;
  1603. /*
  1604. * trigger the readahead for extent tree csum tree and wait for
  1605. * completion. During readahead, the scrub is officially paused
  1606. * to not hold off transaction commits
  1607. */
  1608. logical = base + offset;
  1609. wait_event(sdev->list_wait,
  1610. atomic_read(&sdev->in_flight) == 0);
  1611. atomic_inc(&fs_info->scrubs_paused);
  1612. wake_up(&fs_info->scrub_pause_wait);
  1613. /* FIXME it might be better to start readahead at commit root */
  1614. key_start.objectid = logical;
  1615. key_start.type = BTRFS_EXTENT_ITEM_KEY;
  1616. key_start.offset = (u64)0;
  1617. key_end.objectid = base + offset + nstripes * increment;
  1618. key_end.type = BTRFS_EXTENT_ITEM_KEY;
  1619. key_end.offset = (u64)0;
  1620. reada1 = btrfs_reada_add(root, &key_start, &key_end);
  1621. key_start.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
  1622. key_start.type = BTRFS_EXTENT_CSUM_KEY;
  1623. key_start.offset = logical;
  1624. key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
  1625. key_end.type = BTRFS_EXTENT_CSUM_KEY;
  1626. key_end.offset = base + offset + nstripes * increment;
  1627. reada2 = btrfs_reada_add(csum_root, &key_start, &key_end);
  1628. if (!IS_ERR(reada1))
  1629. btrfs_reada_wait(reada1);
  1630. if (!IS_ERR(reada2))
  1631. btrfs_reada_wait(reada2);
  1632. mutex_lock(&fs_info->scrub_lock);
  1633. while (atomic_read(&fs_info->scrub_pause_req)) {
  1634. mutex_unlock(&fs_info->scrub_lock);
  1635. wait_event(fs_info->scrub_pause_wait,
  1636. atomic_read(&fs_info->scrub_pause_req) == 0);
  1637. mutex_lock(&fs_info->scrub_lock);
  1638. }
  1639. atomic_dec(&fs_info->scrubs_paused);
  1640. mutex_unlock(&fs_info->scrub_lock);
  1641. wake_up(&fs_info->scrub_pause_wait);
  1642. /*
  1643. * collect all data csums for the stripe to avoid seeking during
  1644. * the scrub. This might currently (crc32) end up to be about 1MB
  1645. */
  1646. blk_start_plug(&plug);
  1647. /*
  1648. * now find all extents for each stripe and scrub them
  1649. */
  1650. logical = base + offset;
  1651. physical = map->stripes[num].physical;
  1652. ret = 0;
  1653. for (i = 0; i < nstripes; ++i) {
  1654. /*
  1655. * canceled?
  1656. */
  1657. if (atomic_read(&fs_info->scrub_cancel_req) ||
  1658. atomic_read(&sdev->cancel_req)) {
  1659. ret = -ECANCELED;
  1660. goto out;
  1661. }
  1662. /*
  1663. * check to see if we have to pause
  1664. */
  1665. if (atomic_read(&fs_info->scrub_pause_req)) {
  1666. /* push queued extents */
  1667. scrub_submit(sdev);
  1668. wait_event(sdev->list_wait,
  1669. atomic_read(&sdev->in_flight) == 0);
  1670. atomic_inc(&fs_info->scrubs_paused);
  1671. wake_up(&fs_info->scrub_pause_wait);
  1672. mutex_lock(&fs_info->scrub_lock);
  1673. while (atomic_read(&fs_info->scrub_pause_req)) {
  1674. mutex_unlock(&fs_info->scrub_lock);
  1675. wait_event(fs_info->scrub_pause_wait,
  1676. atomic_read(&fs_info->scrub_pause_req) == 0);
  1677. mutex_lock(&fs_info->scrub_lock);
  1678. }
  1679. atomic_dec(&fs_info->scrubs_paused);
  1680. mutex_unlock(&fs_info->scrub_lock);
  1681. wake_up(&fs_info->scrub_pause_wait);
  1682. }
  1683. ret = btrfs_lookup_csums_range(csum_root, logical,
  1684. logical + map->stripe_len - 1,
  1685. &sdev->csum_list, 1);
  1686. if (ret)
  1687. goto out;
  1688. key.objectid = logical;
  1689. key.type = BTRFS_EXTENT_ITEM_KEY;
  1690. key.offset = (u64)0;
  1691. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  1692. if (ret < 0)
  1693. goto out;
  1694. if (ret > 0) {
  1695. ret = btrfs_previous_item(root, path, 0,
  1696. BTRFS_EXTENT_ITEM_KEY);
  1697. if (ret < 0)
  1698. goto out;
  1699. if (ret > 0) {
  1700. /* there's no smaller item, so stick with the
  1701. * larger one */
  1702. btrfs_release_path(path);
  1703. ret = btrfs_search_slot(NULL, root, &key,
  1704. path, 0, 0);
  1705. if (ret < 0)
  1706. goto out;
  1707. }
  1708. }
  1709. while (1) {
  1710. l = path->nodes[0];
  1711. slot = path->slots[0];
  1712. if (slot >= btrfs_header_nritems(l)) {
  1713. ret = btrfs_next_leaf(root, path);
  1714. if (ret == 0)
  1715. continue;
  1716. if (ret < 0)
  1717. goto out;
  1718. break;
  1719. }
  1720. btrfs_item_key_to_cpu(l, &key, slot);
  1721. if (key.objectid + key.offset <= logical)
  1722. goto next;
  1723. if (key.objectid >= logical + map->stripe_len)
  1724. break;
  1725. if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY)
  1726. goto next;
  1727. extent = btrfs_item_ptr(l, slot,
  1728. struct btrfs_extent_item);
  1729. flags = btrfs_extent_flags(l, extent);
  1730. generation = btrfs_extent_generation(l, extent);
  1731. if (key.objectid < logical &&
  1732. (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) {
  1733. printk(KERN_ERR
  1734. "btrfs scrub: tree block %llu spanning "
  1735. "stripes, ignored. logical=%llu\n",
  1736. (unsigned long long)key.objectid,
  1737. (unsigned long long)logical);
  1738. goto next;
  1739. }
  1740. /*
  1741. * trim extent to this stripe
  1742. */
  1743. if (key.objectid < logical) {
  1744. key.offset -= logical - key.objectid;
  1745. key.objectid = logical;
  1746. }
  1747. if (key.objectid + key.offset >
  1748. logical + map->stripe_len) {
  1749. key.offset = logical + map->stripe_len -
  1750. key.objectid;
  1751. }
  1752. ret = scrub_extent(sdev, key.objectid, key.offset,
  1753. key.objectid - logical + physical,
  1754. flags, generation, mirror_num);
  1755. if (ret)
  1756. goto out;
  1757. next:
  1758. path->slots[0]++;
  1759. }
  1760. btrfs_release_path(path);
  1761. logical += increment;
  1762. physical += map->stripe_len;
  1763. spin_lock(&sdev->stat_lock);
  1764. sdev->stat.last_physical = physical;
  1765. spin_unlock(&sdev->stat_lock);
  1766. }
  1767. /* push queued extents */
  1768. scrub_submit(sdev);
  1769. out:
  1770. blk_finish_plug(&plug);
  1771. btrfs_free_path(path);
  1772. return ret < 0 ? ret : 0;
  1773. }
  1774. static noinline_for_stack int scrub_chunk(struct scrub_dev *sdev,
  1775. u64 chunk_tree, u64 chunk_objectid, u64 chunk_offset, u64 length,
  1776. u64 dev_offset)
  1777. {
  1778. struct btrfs_mapping_tree *map_tree =
  1779. &sdev->dev->dev_root->fs_info->mapping_tree;
  1780. struct map_lookup *map;
  1781. struct extent_map *em;
  1782. int i;
  1783. int ret = -EINVAL;
  1784. read_lock(&map_tree->map_tree.lock);
  1785. em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
  1786. read_unlock(&map_tree->map_tree.lock);
  1787. if (!em)
  1788. return -EINVAL;
  1789. map = (struct map_lookup *)em->bdev;
  1790. if (em->start != chunk_offset)
  1791. goto out;
  1792. if (em->len < length)
  1793. goto out;
  1794. for (i = 0; i < map->num_stripes; ++i) {
  1795. if (map->stripes[i].dev == sdev->dev &&
  1796. map->stripes[i].physical == dev_offset) {
  1797. ret = scrub_stripe(sdev, map, i, chunk_offset, length);
  1798. if (ret)
  1799. goto out;
  1800. }
  1801. }
  1802. out:
  1803. free_extent_map(em);
  1804. return ret;
  1805. }
  1806. static noinline_for_stack
  1807. int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end)
  1808. {
  1809. struct btrfs_dev_extent *dev_extent = NULL;
  1810. struct btrfs_path *path;
  1811. struct btrfs_root *root = sdev->dev->dev_root;
  1812. struct btrfs_fs_info *fs_info = root->fs_info;
  1813. u64 length;
  1814. u64 chunk_tree;
  1815. u64 chunk_objectid;
  1816. u64 chunk_offset;
  1817. int ret;
  1818. int slot;
  1819. struct extent_buffer *l;
  1820. struct btrfs_key key;
  1821. struct btrfs_key found_key;
  1822. struct btrfs_block_group_cache *cache;
  1823. path = btrfs_alloc_path();
  1824. if (!path)
  1825. return -ENOMEM;
  1826. path->reada = 2;
  1827. path->search_commit_root = 1;
  1828. path->skip_locking = 1;
  1829. key.objectid = sdev->dev->devid;
  1830. key.offset = 0ull;
  1831. key.type = BTRFS_DEV_EXTENT_KEY;
  1832. while (1) {
  1833. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  1834. if (ret < 0)
  1835. break;
  1836. if (ret > 0) {
  1837. if (path->slots[0] >=
  1838. btrfs_header_nritems(path->nodes[0])) {
  1839. ret = btrfs_next_leaf(root, path);
  1840. if (ret)
  1841. break;
  1842. }
  1843. }
  1844. l = path->nodes[0];
  1845. slot = path->slots[0];
  1846. btrfs_item_key_to_cpu(l, &found_key, slot);
  1847. if (found_key.objectid != sdev->dev->devid)
  1848. break;
  1849. if (btrfs_key_type(&found_key) != BTRFS_DEV_EXTENT_KEY)
  1850. break;
  1851. if (found_key.offset >= end)
  1852. break;
  1853. if (found_key.offset < key.offset)
  1854. break;
  1855. dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
  1856. length = btrfs_dev_extent_length(l, dev_extent);
  1857. if (found_key.offset + length <= start) {
  1858. key.offset = found_key.offset + length;
  1859. btrfs_release_path(path);
  1860. continue;
  1861. }
  1862. chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
  1863. chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
  1864. chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
  1865. /*
  1866. * get a reference on the corresponding block group to prevent
  1867. * the chunk from going away while we scrub it
  1868. */
  1869. cache = btrfs_lookup_block_group(fs_info, chunk_offset);
  1870. if (!cache) {
  1871. ret = -ENOENT;
  1872. break;
  1873. }
  1874. ret = scrub_chunk(sdev, chunk_tree, chunk_objectid,
  1875. chunk_offset, length, found_key.offset);
  1876. btrfs_put_block_group(cache);
  1877. if (ret)
  1878. break;
  1879. key.offset = found_key.offset + length;
  1880. btrfs_release_path(path);
  1881. }
  1882. btrfs_free_path(path);
  1883. /*
  1884. * ret can still be 1 from search_slot or next_leaf,
  1885. * that's not an error
  1886. */
  1887. return ret < 0 ? ret : 0;
  1888. }
  1889. static noinline_for_stack int scrub_supers(struct scrub_dev *sdev)
  1890. {
  1891. int i;
  1892. u64 bytenr;
  1893. u64 gen;
  1894. int ret;
  1895. struct btrfs_device *device = sdev->dev;
  1896. struct btrfs_root *root = device->dev_root;
  1897. if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
  1898. return -EIO;
  1899. gen = root->fs_info->last_trans_committed;
  1900. for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
  1901. bytenr = btrfs_sb_offset(i);
  1902. if (bytenr + BTRFS_SUPER_INFO_SIZE > device->total_bytes)
  1903. break;
  1904. ret = scrub_pages(sdev, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
  1905. BTRFS_EXTENT_FLAG_SUPER, gen, i, NULL, 1);
  1906. if (ret)
  1907. return ret;
  1908. }
  1909. wait_event(sdev->list_wait, atomic_read(&sdev->in_flight) == 0);
  1910. return 0;
  1911. }
  1912. /*
  1913. * get a reference count on fs_info->scrub_workers. start worker if necessary
  1914. */
  1915. static noinline_for_stack int scrub_workers_get(struct btrfs_root *root)
  1916. {
  1917. struct btrfs_fs_info *fs_info = root->fs_info;
  1918. int ret = 0;
  1919. mutex_lock(&fs_info->scrub_lock);
  1920. if (fs_info->scrub_workers_refcnt == 0) {
  1921. btrfs_init_workers(&fs_info->scrub_workers, "scrub",
  1922. fs_info->thread_pool_size, &fs_info->generic_worker);
  1923. fs_info->scrub_workers.idle_thresh = 4;
  1924. ret = btrfs_start_workers(&fs_info->scrub_workers);
  1925. if (ret)
  1926. goto out;
  1927. }
  1928. ++fs_info->scrub_workers_refcnt;
  1929. out:
  1930. mutex_unlock(&fs_info->scrub_lock);
  1931. return ret;
  1932. }
  1933. static noinline_for_stack void scrub_workers_put(struct btrfs_root *root)
  1934. {
  1935. struct btrfs_fs_info *fs_info = root->fs_info;
  1936. mutex_lock(&fs_info->scrub_lock);
  1937. if (--fs_info->scrub_workers_refcnt == 0)
  1938. btrfs_stop_workers(&fs_info->scrub_workers);
  1939. WARN_ON(fs_info->scrub_workers_refcnt < 0);
  1940. mutex_unlock(&fs_info->scrub_lock);
  1941. }
  1942. int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end,
  1943. struct btrfs_scrub_progress *progress, int readonly)
  1944. {
  1945. struct scrub_dev *sdev;
  1946. struct btrfs_fs_info *fs_info = root->fs_info;
  1947. int ret;
  1948. struct btrfs_device *dev;
  1949. if (btrfs_fs_closing(root->fs_info))
  1950. return -EINVAL;
  1951. /*
  1952. * check some assumptions
  1953. */
  1954. if (root->nodesize != root->leafsize) {
  1955. printk(KERN_ERR
  1956. "btrfs_scrub: size assumption nodesize == leafsize (%d == %d) fails\n",
  1957. root->nodesize, root->leafsize);
  1958. return -EINVAL;
  1959. }
  1960. if (root->nodesize > BTRFS_STRIPE_LEN) {
  1961. /*
  1962. * in this case scrub is unable to calculate the checksum
  1963. * the way scrub is implemented. Do not handle this
  1964. * situation at all because it won't ever happen.
  1965. */
  1966. printk(KERN_ERR
  1967. "btrfs_scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails\n",
  1968. root->nodesize, BTRFS_STRIPE_LEN);
  1969. return -EINVAL;
  1970. }
  1971. if (root->sectorsize != PAGE_SIZE) {
  1972. /* not supported for data w/o checksums */
  1973. printk(KERN_ERR
  1974. "btrfs_scrub: size assumption sectorsize != PAGE_SIZE (%d != %lld) fails\n",
  1975. root->sectorsize, (unsigned long long)PAGE_SIZE);
  1976. return -EINVAL;
  1977. }
  1978. ret = scrub_workers_get(root);
  1979. if (ret)
  1980. return ret;
  1981. mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
  1982. dev = btrfs_find_device(root, devid, NULL, NULL);
  1983. if (!dev || dev->missing) {
  1984. mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
  1985. scrub_workers_put(root);
  1986. return -ENODEV;
  1987. }
  1988. mutex_lock(&fs_info->scrub_lock);
  1989. if (!dev->in_fs_metadata) {
  1990. mutex_unlock(&fs_info->scrub_lock);
  1991. mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
  1992. scrub_workers_put(root);
  1993. return -ENODEV;
  1994. }
  1995. if (dev->scrub_device) {
  1996. mutex_unlock(&fs_info->scrub_lock);
  1997. mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
  1998. scrub_workers_put(root);
  1999. return -EINPROGRESS;
  2000. }
  2001. sdev = scrub_setup_dev(dev);
  2002. if (IS_ERR(sdev)) {
  2003. mutex_unlock(&fs_info->scrub_lock);
  2004. mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
  2005. scrub_workers_put(root);
  2006. return PTR_ERR(sdev);
  2007. }
  2008. sdev->readonly = readonly;
  2009. dev->scrub_device = sdev;
  2010. atomic_inc(&fs_info->scrubs_running);
  2011. mutex_unlock(&fs_info->scrub_lock);
  2012. mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
  2013. down_read(&fs_info->scrub_super_lock);
  2014. ret = scrub_supers(sdev);
  2015. up_read(&fs_info->scrub_super_lock);
  2016. if (!ret)
  2017. ret = scrub_enumerate_chunks(sdev, start, end);
  2018. wait_event(sdev->list_wait, atomic_read(&sdev->in_flight) == 0);
  2019. atomic_dec(&fs_info->scrubs_running);
  2020. wake_up(&fs_info->scrub_pause_wait);
  2021. wait_event(sdev->list_wait, atomic_read(&sdev->fixup_cnt) == 0);
  2022. if (progress)
  2023. memcpy(progress, &sdev->stat, sizeof(*progress));
  2024. mutex_lock(&fs_info->scrub_lock);
  2025. dev->scrub_device = NULL;
  2026. mutex_unlock(&fs_info->scrub_lock);
  2027. scrub_free_dev(sdev);
  2028. scrub_workers_put(root);
  2029. return ret;
  2030. }
  2031. void btrfs_scrub_pause(struct btrfs_root *root)
  2032. {
  2033. struct btrfs_fs_info *fs_info = root->fs_info;
  2034. mutex_lock(&fs_info->scrub_lock);
  2035. atomic_inc(&fs_info->scrub_pause_req);
  2036. while (atomic_read(&fs_info->scrubs_paused) !=
  2037. atomic_read(&fs_info->scrubs_running)) {
  2038. mutex_unlock(&fs_info->scrub_lock);
  2039. wait_event(fs_info->scrub_pause_wait,
  2040. atomic_read(&fs_info->scrubs_paused) ==
  2041. atomic_read(&fs_info->scrubs_running));
  2042. mutex_lock(&fs_info->scrub_lock);
  2043. }
  2044. mutex_unlock(&fs_info->scrub_lock);
  2045. }
  2046. void btrfs_scrub_continue(struct btrfs_root *root)
  2047. {
  2048. struct btrfs_fs_info *fs_info = root->fs_info;
  2049. atomic_dec(&fs_info->scrub_pause_req);
  2050. wake_up(&fs_info->scrub_pause_wait);
  2051. }
  2052. void btrfs_scrub_pause_super(struct btrfs_root *root)
  2053. {
  2054. down_write(&root->fs_info->scrub_super_lock);
  2055. }
  2056. void btrfs_scrub_continue_super(struct btrfs_root *root)
  2057. {
  2058. up_write(&root->fs_info->scrub_super_lock);
  2059. }
  2060. int __btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
  2061. {
  2062. mutex_lock(&fs_info->scrub_lock);
  2063. if (!atomic_read(&fs_info->scrubs_running)) {
  2064. mutex_unlock(&fs_info->scrub_lock);
  2065. return -ENOTCONN;
  2066. }
  2067. atomic_inc(&fs_info->scrub_cancel_req);
  2068. while (atomic_read(&fs_info->scrubs_running)) {
  2069. mutex_unlock(&fs_info->scrub_lock);
  2070. wait_event(fs_info->scrub_pause_wait,
  2071. atomic_read(&fs_info->scrubs_running) == 0);
  2072. mutex_lock(&fs_info->scrub_lock);
  2073. }
  2074. atomic_dec(&fs_info->scrub_cancel_req);
  2075. mutex_unlock(&fs_info->scrub_lock);
  2076. return 0;
  2077. }
  2078. int btrfs_scrub_cancel(struct btrfs_root *root)
  2079. {
  2080. return __btrfs_scrub_cancel(root->fs_info);
  2081. }
  2082. int btrfs_scrub_cancel_dev(struct btrfs_root *root, struct btrfs_device *dev)
  2083. {
  2084. struct btrfs_fs_info *fs_info = root->fs_info;
  2085. struct scrub_dev *sdev;
  2086. mutex_lock(&fs_info->scrub_lock);
  2087. sdev = dev->scrub_device;
  2088. if (!sdev) {
  2089. mutex_unlock(&fs_info->scrub_lock);
  2090. return -ENOTCONN;
  2091. }
  2092. atomic_inc(&sdev->cancel_req);
  2093. while (dev->scrub_device) {
  2094. mutex_unlock(&fs_info->scrub_lock);
  2095. wait_event(fs_info->scrub_pause_wait,
  2096. dev->scrub_device == NULL);
  2097. mutex_lock(&fs_info->scrub_lock);
  2098. }
  2099. mutex_unlock(&fs_info->scrub_lock);
  2100. return 0;
  2101. }
  2102. int btrfs_scrub_cancel_devid(struct btrfs_root *root, u64 devid)
  2103. {
  2104. struct btrfs_fs_info *fs_info = root->fs_info;
  2105. struct btrfs_device *dev;
  2106. int ret;
  2107. /*
  2108. * we have to hold the device_list_mutex here so the device
  2109. * does not go away in cancel_dev. FIXME: find a better solution
  2110. */
  2111. mutex_lock(&fs_info->fs_devices->device_list_mutex);
  2112. dev = btrfs_find_device(root, devid, NULL, NULL);
  2113. if (!dev) {
  2114. mutex_unlock(&fs_info->fs_devices->device_list_mutex);
  2115. return -ENODEV;
  2116. }
  2117. ret = btrfs_scrub_cancel_dev(root, dev);
  2118. mutex_unlock(&fs_info->fs_devices->device_list_mutex);
  2119. return ret;
  2120. }
  2121. int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
  2122. struct btrfs_scrub_progress *progress)
  2123. {
  2124. struct btrfs_device *dev;
  2125. struct scrub_dev *sdev = NULL;
  2126. mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
  2127. dev = btrfs_find_device(root, devid, NULL, NULL);
  2128. if (dev)
  2129. sdev = dev->scrub_device;
  2130. if (sdev)
  2131. memcpy(progress, &sdev->stat, sizeof(*progress));
  2132. mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
  2133. return dev ? (sdev ? 0 : -ENOTCONN) : -ENODEV;
  2134. }