raid1.c 58 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220
  1. /*
  2. * raid1.c : Multiple Devices driver for Linux
  3. *
  4. * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
  5. *
  6. * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman
  7. *
  8. * RAID-1 management functions.
  9. *
  10. * Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000
  11. *
  12. * Fixes to reconstruction by Jakob Østergaard" <jakob@ostenfeld.dk>
  13. * Various fixes by Neil Brown <neilb@cse.unsw.edu.au>
  14. *
  15. * Changes by Peter T. Breuer <ptb@it.uc3m.es> 31/1/2003 to support
  16. * bitmapped intelligence in resync:
  17. *
  18. * - bitmap marked during normal i/o
  19. * - bitmap used to skip nondirty blocks during sync
  20. *
  21. * Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology:
  22. * - persistent bitmap code
  23. *
  24. * This program is free software; you can redistribute it and/or modify
  25. * it under the terms of the GNU General Public License as published by
  26. * the Free Software Foundation; either version 2, or (at your option)
  27. * any later version.
  28. *
  29. * You should have received a copy of the GNU General Public License
  30. * (for example /usr/src/linux/COPYING); if not, write to the Free
  31. * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  32. */
  33. #include "dm-bio-list.h"
  34. #include <linux/raid/raid1.h>
  35. #include <linux/raid/bitmap.h>
  36. #define DEBUG 0
  37. #if DEBUG
  38. #define PRINTK(x...) printk(x)
  39. #else
  40. #define PRINTK(x...)
  41. #endif
  42. /*
  43. * Number of guaranteed r1bios in case of extreme VM load:
  44. */
  45. #define NR_RAID1_BIOS 256
  46. static void unplug_slaves(mddev_t *mddev);
  47. static void allow_barrier(conf_t *conf);
  48. static void lower_barrier(conf_t *conf);
  49. static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
  50. {
  51. struct pool_info *pi = data;
  52. r1bio_t *r1_bio;
  53. int size = offsetof(r1bio_t, bios[pi->raid_disks]);
  54. /* allocate a r1bio with room for raid_disks entries in the bios array */
  55. r1_bio = kzalloc(size, gfp_flags);
  56. if (!r1_bio)
  57. unplug_slaves(pi->mddev);
  58. return r1_bio;
  59. }
  60. static void r1bio_pool_free(void *r1_bio, void *data)
  61. {
  62. kfree(r1_bio);
  63. }
  64. #define RESYNC_BLOCK_SIZE (64*1024)
  65. //#define RESYNC_BLOCK_SIZE PAGE_SIZE
  66. #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
  67. #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
  68. #define RESYNC_WINDOW (2048*1024)
  69. static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
  70. {
  71. struct pool_info *pi = data;
  72. struct page *page;
  73. r1bio_t *r1_bio;
  74. struct bio *bio;
  75. int i, j;
  76. r1_bio = r1bio_pool_alloc(gfp_flags, pi);
  77. if (!r1_bio) {
  78. unplug_slaves(pi->mddev);
  79. return NULL;
  80. }
  81. /*
  82. * Allocate bios : 1 for reading, n-1 for writing
  83. */
  84. for (j = pi->raid_disks ; j-- ; ) {
  85. bio = bio_alloc(gfp_flags, RESYNC_PAGES);
  86. if (!bio)
  87. goto out_free_bio;
  88. r1_bio->bios[j] = bio;
  89. }
  90. /*
  91. * Allocate RESYNC_PAGES data pages and attach them to
  92. * the first bio.
  93. * If this is a user-requested check/repair, allocate
  94. * RESYNC_PAGES for each bio.
  95. */
  96. if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
  97. j = pi->raid_disks;
  98. else
  99. j = 1;
  100. while(j--) {
  101. bio = r1_bio->bios[j];
  102. for (i = 0; i < RESYNC_PAGES; i++) {
  103. page = alloc_page(gfp_flags);
  104. if (unlikely(!page))
  105. goto out_free_pages;
  106. bio->bi_io_vec[i].bv_page = page;
  107. }
  108. }
  109. /* If not user-requests, copy the page pointers to all bios */
  110. if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) {
  111. for (i=0; i<RESYNC_PAGES ; i++)
  112. for (j=1; j<pi->raid_disks; j++)
  113. r1_bio->bios[j]->bi_io_vec[i].bv_page =
  114. r1_bio->bios[0]->bi_io_vec[i].bv_page;
  115. }
  116. r1_bio->master_bio = NULL;
  117. return r1_bio;
  118. out_free_pages:
  119. for (i=0; i < RESYNC_PAGES ; i++)
  120. for (j=0 ; j < pi->raid_disks; j++)
  121. safe_put_page(r1_bio->bios[j]->bi_io_vec[i].bv_page);
  122. j = -1;
  123. out_free_bio:
  124. while ( ++j < pi->raid_disks )
  125. bio_put(r1_bio->bios[j]);
  126. r1bio_pool_free(r1_bio, data);
  127. return NULL;
  128. }
  129. static void r1buf_pool_free(void *__r1_bio, void *data)
  130. {
  131. struct pool_info *pi = data;
  132. int i,j;
  133. r1bio_t *r1bio = __r1_bio;
  134. for (i = 0; i < RESYNC_PAGES; i++)
  135. for (j = pi->raid_disks; j-- ;) {
  136. if (j == 0 ||
  137. r1bio->bios[j]->bi_io_vec[i].bv_page !=
  138. r1bio->bios[0]->bi_io_vec[i].bv_page)
  139. safe_put_page(r1bio->bios[j]->bi_io_vec[i].bv_page);
  140. }
  141. for (i=0 ; i < pi->raid_disks; i++)
  142. bio_put(r1bio->bios[i]);
  143. r1bio_pool_free(r1bio, data);
  144. }
  145. static void put_all_bios(conf_t *conf, r1bio_t *r1_bio)
  146. {
  147. int i;
  148. for (i = 0; i < conf->raid_disks; i++) {
  149. struct bio **bio = r1_bio->bios + i;
  150. if (*bio && *bio != IO_BLOCKED)
  151. bio_put(*bio);
  152. *bio = NULL;
  153. }
  154. }
  155. static void free_r1bio(r1bio_t *r1_bio)
  156. {
  157. conf_t *conf = mddev_to_conf(r1_bio->mddev);
  158. /*
  159. * Wake up any possible resync thread that waits for the device
  160. * to go idle.
  161. */
  162. allow_barrier(conf);
  163. put_all_bios(conf, r1_bio);
  164. mempool_free(r1_bio, conf->r1bio_pool);
  165. }
  166. static void put_buf(r1bio_t *r1_bio)
  167. {
  168. conf_t *conf = mddev_to_conf(r1_bio->mddev);
  169. int i;
  170. for (i=0; i<conf->raid_disks; i++) {
  171. struct bio *bio = r1_bio->bios[i];
  172. if (bio->bi_end_io)
  173. rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
  174. }
  175. mempool_free(r1_bio, conf->r1buf_pool);
  176. lower_barrier(conf);
  177. }
  178. static void reschedule_retry(r1bio_t *r1_bio)
  179. {
  180. unsigned long flags;
  181. mddev_t *mddev = r1_bio->mddev;
  182. conf_t *conf = mddev_to_conf(mddev);
  183. spin_lock_irqsave(&conf->device_lock, flags);
  184. list_add(&r1_bio->retry_list, &conf->retry_list);
  185. conf->nr_queued ++;
  186. spin_unlock_irqrestore(&conf->device_lock, flags);
  187. wake_up(&conf->wait_barrier);
  188. md_wakeup_thread(mddev->thread);
  189. }
  190. /*
  191. * raid_end_bio_io() is called when we have finished servicing a mirrored
  192. * operation and are ready to return a success/failure code to the buffer
  193. * cache layer.
  194. */
  195. static void raid_end_bio_io(r1bio_t *r1_bio)
  196. {
  197. struct bio *bio = r1_bio->master_bio;
  198. /* if nobody has done the final endio yet, do it now */
  199. if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
  200. PRINTK(KERN_DEBUG "raid1: sync end %s on sectors %llu-%llu\n",
  201. (bio_data_dir(bio) == WRITE) ? "write" : "read",
  202. (unsigned long long) bio->bi_sector,
  203. (unsigned long long) bio->bi_sector +
  204. (bio->bi_size >> 9) - 1);
  205. bio_endio(bio, bio->bi_size,
  206. test_bit(R1BIO_Uptodate, &r1_bio->state) ? 0 : -EIO);
  207. }
  208. free_r1bio(r1_bio);
  209. }
  210. /*
  211. * Update disk head position estimator based on IRQ completion info.
  212. */
  213. static inline void update_head_pos(int disk, r1bio_t *r1_bio)
  214. {
  215. conf_t *conf = mddev_to_conf(r1_bio->mddev);
  216. conf->mirrors[disk].head_position =
  217. r1_bio->sector + (r1_bio->sectors);
  218. }
  219. static int raid1_end_read_request(struct bio *bio, unsigned int bytes_done, int error)
  220. {
  221. int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  222. r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
  223. int mirror;
  224. conf_t *conf = mddev_to_conf(r1_bio->mddev);
  225. if (bio->bi_size)
  226. return 1;
  227. mirror = r1_bio->read_disk;
  228. /*
  229. * this branch is our 'one mirror IO has finished' event handler:
  230. */
  231. update_head_pos(mirror, r1_bio);
  232. if (uptodate || (conf->raid_disks - conf->mddev->degraded) <= 1) {
  233. /*
  234. * Set R1BIO_Uptodate in our master bio, so that
  235. * we will return a good error code for to the higher
  236. * levels even if IO on some other mirrored buffer fails.
  237. *
  238. * The 'master' represents the composite IO operation to
  239. * user-side. So if something waits for IO, then it will
  240. * wait for the 'master' bio.
  241. */
  242. if (uptodate)
  243. set_bit(R1BIO_Uptodate, &r1_bio->state);
  244. raid_end_bio_io(r1_bio);
  245. } else {
  246. /*
  247. * oops, read error:
  248. */
  249. char b[BDEVNAME_SIZE];
  250. if (printk_ratelimit())
  251. printk(KERN_ERR "raid1: %s: rescheduling sector %llu\n",
  252. bdevname(conf->mirrors[mirror].rdev->bdev,b), (unsigned long long)r1_bio->sector);
  253. reschedule_retry(r1_bio);
  254. }
  255. rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
  256. return 0;
  257. }
  258. static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int error)
  259. {
  260. int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  261. r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
  262. int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
  263. conf_t *conf = mddev_to_conf(r1_bio->mddev);
  264. struct bio *to_put = NULL;
  265. if (bio->bi_size)
  266. return 1;
  267. for (mirror = 0; mirror < conf->raid_disks; mirror++)
  268. if (r1_bio->bios[mirror] == bio)
  269. break;
  270. if (error == -EOPNOTSUPP && test_bit(R1BIO_Barrier, &r1_bio->state)) {
  271. set_bit(BarriersNotsupp, &conf->mirrors[mirror].rdev->flags);
  272. set_bit(R1BIO_BarrierRetry, &r1_bio->state);
  273. r1_bio->mddev->barriers_work = 0;
  274. /* Don't rdev_dec_pending in this branch - keep it for the retry */
  275. } else {
  276. /*
  277. * this branch is our 'one mirror IO has finished' event handler:
  278. */
  279. r1_bio->bios[mirror] = NULL;
  280. to_put = bio;
  281. if (!uptodate) {
  282. md_error(r1_bio->mddev, conf->mirrors[mirror].rdev);
  283. /* an I/O failed, we can't clear the bitmap */
  284. set_bit(R1BIO_Degraded, &r1_bio->state);
  285. } else
  286. /*
  287. * Set R1BIO_Uptodate in our master bio, so that
  288. * we will return a good error code for to the higher
  289. * levels even if IO on some other mirrored buffer fails.
  290. *
  291. * The 'master' represents the composite IO operation to
  292. * user-side. So if something waits for IO, then it will
  293. * wait for the 'master' bio.
  294. */
  295. set_bit(R1BIO_Uptodate, &r1_bio->state);
  296. update_head_pos(mirror, r1_bio);
  297. if (behind) {
  298. if (test_bit(WriteMostly, &conf->mirrors[mirror].rdev->flags))
  299. atomic_dec(&r1_bio->behind_remaining);
  300. /* In behind mode, we ACK the master bio once the I/O has safely
  301. * reached all non-writemostly disks. Setting the Returned bit
  302. * ensures that this gets done only once -- we don't ever want to
  303. * return -EIO here, instead we'll wait */
  304. if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) &&
  305. test_bit(R1BIO_Uptodate, &r1_bio->state)) {
  306. /* Maybe we can return now */
  307. if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
  308. struct bio *mbio = r1_bio->master_bio;
  309. PRINTK(KERN_DEBUG "raid1: behind end write sectors %llu-%llu\n",
  310. (unsigned long long) mbio->bi_sector,
  311. (unsigned long long) mbio->bi_sector +
  312. (mbio->bi_size >> 9) - 1);
  313. bio_endio(mbio, mbio->bi_size, 0);
  314. }
  315. }
  316. }
  317. rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
  318. }
  319. /*
  320. *
  321. * Let's see if all mirrored write operations have finished
  322. * already.
  323. */
  324. if (atomic_dec_and_test(&r1_bio->remaining)) {
  325. if (test_bit(R1BIO_BarrierRetry, &r1_bio->state))
  326. reschedule_retry(r1_bio);
  327. else {
  328. /* it really is the end of this request */
  329. if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
  330. /* free extra copy of the data pages */
  331. int i = bio->bi_vcnt;
  332. while (i--)
  333. safe_put_page(bio->bi_io_vec[i].bv_page);
  334. }
  335. /* clear the bitmap if all writes complete successfully */
  336. bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
  337. r1_bio->sectors,
  338. !test_bit(R1BIO_Degraded, &r1_bio->state),
  339. behind);
  340. md_write_end(r1_bio->mddev);
  341. raid_end_bio_io(r1_bio);
  342. }
  343. }
  344. if (to_put)
  345. bio_put(to_put);
  346. return 0;
  347. }
  348. /*
  349. * This routine returns the disk from which the requested read should
  350. * be done. There is a per-array 'next expected sequential IO' sector
  351. * number - if this matches on the next IO then we use the last disk.
  352. * There is also a per-disk 'last know head position' sector that is
  353. * maintained from IRQ contexts, both the normal and the resync IO
  354. * completion handlers update this position correctly. If there is no
  355. * perfect sequential match then we pick the disk whose head is closest.
  356. *
  357. * If there are 2 mirrors in the same 2 devices, performance degrades
  358. * because position is mirror, not device based.
  359. *
  360. * The rdev for the device selected will have nr_pending incremented.
  361. */
  362. static int read_balance(conf_t *conf, r1bio_t *r1_bio)
  363. {
  364. const unsigned long this_sector = r1_bio->sector;
  365. int new_disk = conf->last_used, disk = new_disk;
  366. int wonly_disk = -1;
  367. const int sectors = r1_bio->sectors;
  368. sector_t new_distance, current_distance;
  369. mdk_rdev_t *rdev;
  370. rcu_read_lock();
  371. /*
  372. * Check if we can balance. We can balance on the whole
  373. * device if no resync is going on, or below the resync window.
  374. * We take the first readable disk when above the resync window.
  375. */
  376. retry:
  377. if (conf->mddev->recovery_cp < MaxSector &&
  378. (this_sector + sectors >= conf->next_resync)) {
  379. /* Choose the first operation device, for consistancy */
  380. new_disk = 0;
  381. for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
  382. r1_bio->bios[new_disk] == IO_BLOCKED ||
  383. !rdev || !test_bit(In_sync, &rdev->flags)
  384. || test_bit(WriteMostly, &rdev->flags);
  385. rdev = rcu_dereference(conf->mirrors[++new_disk].rdev)) {
  386. if (rdev && test_bit(In_sync, &rdev->flags) &&
  387. r1_bio->bios[new_disk] != IO_BLOCKED)
  388. wonly_disk = new_disk;
  389. if (new_disk == conf->raid_disks - 1) {
  390. new_disk = wonly_disk;
  391. break;
  392. }
  393. }
  394. goto rb_out;
  395. }
  396. /* make sure the disk is operational */
  397. for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
  398. r1_bio->bios[new_disk] == IO_BLOCKED ||
  399. !rdev || !test_bit(In_sync, &rdev->flags) ||
  400. test_bit(WriteMostly, &rdev->flags);
  401. rdev = rcu_dereference(conf->mirrors[new_disk].rdev)) {
  402. if (rdev && test_bit(In_sync, &rdev->flags) &&
  403. r1_bio->bios[new_disk] != IO_BLOCKED)
  404. wonly_disk = new_disk;
  405. if (new_disk <= 0)
  406. new_disk = conf->raid_disks;
  407. new_disk--;
  408. if (new_disk == disk) {
  409. new_disk = wonly_disk;
  410. break;
  411. }
  412. }
  413. if (new_disk < 0)
  414. goto rb_out;
  415. disk = new_disk;
  416. /* now disk == new_disk == starting point for search */
  417. /*
  418. * Don't change to another disk for sequential reads:
  419. */
  420. if (conf->next_seq_sect == this_sector)
  421. goto rb_out;
  422. if (this_sector == conf->mirrors[new_disk].head_position)
  423. goto rb_out;
  424. current_distance = abs(this_sector - conf->mirrors[disk].head_position);
  425. /* Find the disk whose head is closest */
  426. do {
  427. if (disk <= 0)
  428. disk = conf->raid_disks;
  429. disk--;
  430. rdev = rcu_dereference(conf->mirrors[disk].rdev);
  431. if (!rdev || r1_bio->bios[disk] == IO_BLOCKED ||
  432. !test_bit(In_sync, &rdev->flags) ||
  433. test_bit(WriteMostly, &rdev->flags))
  434. continue;
  435. if (!atomic_read(&rdev->nr_pending)) {
  436. new_disk = disk;
  437. break;
  438. }
  439. new_distance = abs(this_sector - conf->mirrors[disk].head_position);
  440. if (new_distance < current_distance) {
  441. current_distance = new_distance;
  442. new_disk = disk;
  443. }
  444. } while (disk != conf->last_used);
  445. rb_out:
  446. if (new_disk >= 0) {
  447. rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
  448. if (!rdev)
  449. goto retry;
  450. atomic_inc(&rdev->nr_pending);
  451. if (!test_bit(In_sync, &rdev->flags)) {
  452. /* cannot risk returning a device that failed
  453. * before we inc'ed nr_pending
  454. */
  455. rdev_dec_pending(rdev, conf->mddev);
  456. goto retry;
  457. }
  458. conf->next_seq_sect = this_sector + sectors;
  459. conf->last_used = new_disk;
  460. }
  461. rcu_read_unlock();
  462. return new_disk;
  463. }
  464. static void unplug_slaves(mddev_t *mddev)
  465. {
  466. conf_t *conf = mddev_to_conf(mddev);
  467. int i;
  468. rcu_read_lock();
  469. for (i=0; i<mddev->raid_disks; i++) {
  470. mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
  471. if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
  472. request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
  473. atomic_inc(&rdev->nr_pending);
  474. rcu_read_unlock();
  475. if (r_queue->unplug_fn)
  476. r_queue->unplug_fn(r_queue);
  477. rdev_dec_pending(rdev, mddev);
  478. rcu_read_lock();
  479. }
  480. }
  481. rcu_read_unlock();
  482. }
  483. static void raid1_unplug(request_queue_t *q)
  484. {
  485. mddev_t *mddev = q->queuedata;
  486. unplug_slaves(mddev);
  487. md_wakeup_thread(mddev->thread);
  488. }
  489. static int raid1_issue_flush(request_queue_t *q, struct gendisk *disk,
  490. sector_t *error_sector)
  491. {
  492. mddev_t *mddev = q->queuedata;
  493. conf_t *conf = mddev_to_conf(mddev);
  494. int i, ret = 0;
  495. rcu_read_lock();
  496. for (i=0; i<mddev->raid_disks && ret == 0; i++) {
  497. mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
  498. if (rdev && !test_bit(Faulty, &rdev->flags)) {
  499. struct block_device *bdev = rdev->bdev;
  500. request_queue_t *r_queue = bdev_get_queue(bdev);
  501. if (!r_queue->issue_flush_fn)
  502. ret = -EOPNOTSUPP;
  503. else {
  504. atomic_inc(&rdev->nr_pending);
  505. rcu_read_unlock();
  506. ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
  507. error_sector);
  508. rdev_dec_pending(rdev, mddev);
  509. rcu_read_lock();
  510. }
  511. }
  512. }
  513. rcu_read_unlock();
  514. return ret;
  515. }
  516. static int raid1_congested(void *data, int bits)
  517. {
  518. mddev_t *mddev = data;
  519. conf_t *conf = mddev_to_conf(mddev);
  520. int i, ret = 0;
  521. rcu_read_lock();
  522. for (i = 0; i < mddev->raid_disks; i++) {
  523. mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
  524. if (rdev && !test_bit(Faulty, &rdev->flags)) {
  525. request_queue_t *q = bdev_get_queue(rdev->bdev);
  526. /* Note the '|| 1' - when read_balance prefers
  527. * non-congested targets, it can be removed
  528. */
  529. if ((bits & (1<<BDI_write_congested)) || 1)
  530. ret |= bdi_congested(&q->backing_dev_info, bits);
  531. else
  532. ret &= bdi_congested(&q->backing_dev_info, bits);
  533. }
  534. }
  535. rcu_read_unlock();
  536. return ret;
  537. }
  538. /* Barriers....
  539. * Sometimes we need to suspend IO while we do something else,
  540. * either some resync/recovery, or reconfigure the array.
  541. * To do this we raise a 'barrier'.
  542. * The 'barrier' is a counter that can be raised multiple times
  543. * to count how many activities are happening which preclude
  544. * normal IO.
  545. * We can only raise the barrier if there is no pending IO.
  546. * i.e. if nr_pending == 0.
  547. * We choose only to raise the barrier if no-one is waiting for the
  548. * barrier to go down. This means that as soon as an IO request
  549. * is ready, no other operations which require a barrier will start
  550. * until the IO request has had a chance.
  551. *
  552. * So: regular IO calls 'wait_barrier'. When that returns there
  553. * is no backgroup IO happening, It must arrange to call
  554. * allow_barrier when it has finished its IO.
  555. * backgroup IO calls must call raise_barrier. Once that returns
  556. * there is no normal IO happeing. It must arrange to call
  557. * lower_barrier when the particular background IO completes.
  558. */
  559. #define RESYNC_DEPTH 32
  560. static void raise_barrier(conf_t *conf)
  561. {
  562. spin_lock_irq(&conf->resync_lock);
  563. /* Wait until no block IO is waiting */
  564. wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
  565. conf->resync_lock,
  566. raid1_unplug(conf->mddev->queue));
  567. /* block any new IO from starting */
  568. conf->barrier++;
  569. /* No wait for all pending IO to complete */
  570. wait_event_lock_irq(conf->wait_barrier,
  571. !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
  572. conf->resync_lock,
  573. raid1_unplug(conf->mddev->queue));
  574. spin_unlock_irq(&conf->resync_lock);
  575. }
  576. static void lower_barrier(conf_t *conf)
  577. {
  578. unsigned long flags;
  579. spin_lock_irqsave(&conf->resync_lock, flags);
  580. conf->barrier--;
  581. spin_unlock_irqrestore(&conf->resync_lock, flags);
  582. wake_up(&conf->wait_barrier);
  583. }
  584. static void wait_barrier(conf_t *conf)
  585. {
  586. spin_lock_irq(&conf->resync_lock);
  587. if (conf->barrier) {
  588. conf->nr_waiting++;
  589. wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
  590. conf->resync_lock,
  591. raid1_unplug(conf->mddev->queue));
  592. conf->nr_waiting--;
  593. }
  594. conf->nr_pending++;
  595. spin_unlock_irq(&conf->resync_lock);
  596. }
  597. static void allow_barrier(conf_t *conf)
  598. {
  599. unsigned long flags;
  600. spin_lock_irqsave(&conf->resync_lock, flags);
  601. conf->nr_pending--;
  602. spin_unlock_irqrestore(&conf->resync_lock, flags);
  603. wake_up(&conf->wait_barrier);
  604. }
  605. static void freeze_array(conf_t *conf)
  606. {
  607. /* stop syncio and normal IO and wait for everything to
  608. * go quite.
  609. * We increment barrier and nr_waiting, and then
  610. * wait until barrier+nr_pending match nr_queued+2
  611. */
  612. spin_lock_irq(&conf->resync_lock);
  613. conf->barrier++;
  614. conf->nr_waiting++;
  615. wait_event_lock_irq(conf->wait_barrier,
  616. conf->barrier+conf->nr_pending == conf->nr_queued+2,
  617. conf->resync_lock,
  618. raid1_unplug(conf->mddev->queue));
  619. spin_unlock_irq(&conf->resync_lock);
  620. }
  621. static void unfreeze_array(conf_t *conf)
  622. {
  623. /* reverse the effect of the freeze */
  624. spin_lock_irq(&conf->resync_lock);
  625. conf->barrier--;
  626. conf->nr_waiting--;
  627. wake_up(&conf->wait_barrier);
  628. spin_unlock_irq(&conf->resync_lock);
  629. }
  630. /* duplicate the data pages for behind I/O */
  631. static struct page **alloc_behind_pages(struct bio *bio)
  632. {
  633. int i;
  634. struct bio_vec *bvec;
  635. struct page **pages = kzalloc(bio->bi_vcnt * sizeof(struct page *),
  636. GFP_NOIO);
  637. if (unlikely(!pages))
  638. goto do_sync_io;
  639. bio_for_each_segment(bvec, bio, i) {
  640. pages[i] = alloc_page(GFP_NOIO);
  641. if (unlikely(!pages[i]))
  642. goto do_sync_io;
  643. memcpy(kmap(pages[i]) + bvec->bv_offset,
  644. kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len);
  645. kunmap(pages[i]);
  646. kunmap(bvec->bv_page);
  647. }
  648. return pages;
  649. do_sync_io:
  650. if (pages)
  651. for (i = 0; i < bio->bi_vcnt && pages[i]; i++)
  652. put_page(pages[i]);
  653. kfree(pages);
  654. PRINTK("%dB behind alloc failed, doing sync I/O\n", bio->bi_size);
  655. return NULL;
  656. }
  657. static int make_request(request_queue_t *q, struct bio * bio)
  658. {
  659. mddev_t *mddev = q->queuedata;
  660. conf_t *conf = mddev_to_conf(mddev);
  661. mirror_info_t *mirror;
  662. r1bio_t *r1_bio;
  663. struct bio *read_bio;
  664. int i, targets = 0, disks;
  665. mdk_rdev_t *rdev;
  666. struct bitmap *bitmap = mddev->bitmap;
  667. unsigned long flags;
  668. struct bio_list bl;
  669. struct page **behind_pages = NULL;
  670. const int rw = bio_data_dir(bio);
  671. const int do_sync = bio_sync(bio);
  672. int do_barriers;
  673. /*
  674. * Register the new request and wait if the reconstruction
  675. * thread has put up a bar for new requests.
  676. * Continue immediately if no resync is active currently.
  677. * We test barriers_work *after* md_write_start as md_write_start
  678. * may cause the first superblock write, and that will check out
  679. * if barriers work.
  680. */
  681. md_write_start(mddev, bio); /* wait on superblock update early */
  682. if (unlikely(!mddev->barriers_work && bio_barrier(bio))) {
  683. if (rw == WRITE)
  684. md_write_end(mddev);
  685. bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
  686. return 0;
  687. }
  688. wait_barrier(conf);
  689. disk_stat_inc(mddev->gendisk, ios[rw]);
  690. disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio));
  691. /*
  692. * make_request() can abort the operation when READA is being
  693. * used and no empty request is available.
  694. *
  695. */
  696. r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
  697. r1_bio->master_bio = bio;
  698. r1_bio->sectors = bio->bi_size >> 9;
  699. r1_bio->state = 0;
  700. r1_bio->mddev = mddev;
  701. r1_bio->sector = bio->bi_sector;
  702. if (rw == READ) {
  703. /*
  704. * read balancing logic:
  705. */
  706. int rdisk = read_balance(conf, r1_bio);
  707. if (rdisk < 0) {
  708. /* couldn't find anywhere to read from */
  709. raid_end_bio_io(r1_bio);
  710. return 0;
  711. }
  712. mirror = conf->mirrors + rdisk;
  713. r1_bio->read_disk = rdisk;
  714. read_bio = bio_clone(bio, GFP_NOIO);
  715. r1_bio->bios[rdisk] = read_bio;
  716. read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset;
  717. read_bio->bi_bdev = mirror->rdev->bdev;
  718. read_bio->bi_end_io = raid1_end_read_request;
  719. read_bio->bi_rw = READ | do_sync;
  720. read_bio->bi_private = r1_bio;
  721. generic_make_request(read_bio);
  722. return 0;
  723. }
  724. /*
  725. * WRITE:
  726. */
  727. /* first select target devices under spinlock and
  728. * inc refcount on their rdev. Record them by setting
  729. * bios[x] to bio
  730. */
  731. disks = conf->raid_disks;
  732. #if 0
  733. { static int first=1;
  734. if (first) printk("First Write sector %llu disks %d\n",
  735. (unsigned long long)r1_bio->sector, disks);
  736. first = 0;
  737. }
  738. #endif
  739. rcu_read_lock();
  740. for (i = 0; i < disks; i++) {
  741. if ((rdev=rcu_dereference(conf->mirrors[i].rdev)) != NULL &&
  742. !test_bit(Faulty, &rdev->flags)) {
  743. atomic_inc(&rdev->nr_pending);
  744. if (test_bit(Faulty, &rdev->flags)) {
  745. rdev_dec_pending(rdev, mddev);
  746. r1_bio->bios[i] = NULL;
  747. } else
  748. r1_bio->bios[i] = bio;
  749. targets++;
  750. } else
  751. r1_bio->bios[i] = NULL;
  752. }
  753. rcu_read_unlock();
  754. BUG_ON(targets == 0); /* we never fail the last device */
  755. if (targets < conf->raid_disks) {
  756. /* array is degraded, we will not clear the bitmap
  757. * on I/O completion (see raid1_end_write_request) */
  758. set_bit(R1BIO_Degraded, &r1_bio->state);
  759. }
  760. /* do behind I/O ? */
  761. if (bitmap &&
  762. atomic_read(&bitmap->behind_writes) < bitmap->max_write_behind &&
  763. (behind_pages = alloc_behind_pages(bio)) != NULL)
  764. set_bit(R1BIO_BehindIO, &r1_bio->state);
  765. atomic_set(&r1_bio->remaining, 0);
  766. atomic_set(&r1_bio->behind_remaining, 0);
  767. do_barriers = bio_barrier(bio);
  768. if (do_barriers)
  769. set_bit(R1BIO_Barrier, &r1_bio->state);
  770. bio_list_init(&bl);
  771. for (i = 0; i < disks; i++) {
  772. struct bio *mbio;
  773. if (!r1_bio->bios[i])
  774. continue;
  775. mbio = bio_clone(bio, GFP_NOIO);
  776. r1_bio->bios[i] = mbio;
  777. mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset;
  778. mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
  779. mbio->bi_end_io = raid1_end_write_request;
  780. mbio->bi_rw = WRITE | do_barriers | do_sync;
  781. mbio->bi_private = r1_bio;
  782. if (behind_pages) {
  783. struct bio_vec *bvec;
  784. int j;
  785. /* Yes, I really want the '__' version so that
  786. * we clear any unused pointer in the io_vec, rather
  787. * than leave them unchanged. This is important
  788. * because when we come to free the pages, we won't
  789. * know the originial bi_idx, so we just free
  790. * them all
  791. */
  792. __bio_for_each_segment(bvec, mbio, j, 0)
  793. bvec->bv_page = behind_pages[j];
  794. if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
  795. atomic_inc(&r1_bio->behind_remaining);
  796. }
  797. atomic_inc(&r1_bio->remaining);
  798. bio_list_add(&bl, mbio);
  799. }
  800. kfree(behind_pages); /* the behind pages are attached to the bios now */
  801. bitmap_startwrite(bitmap, bio->bi_sector, r1_bio->sectors,
  802. test_bit(R1BIO_BehindIO, &r1_bio->state));
  803. spin_lock_irqsave(&conf->device_lock, flags);
  804. bio_list_merge(&conf->pending_bio_list, &bl);
  805. bio_list_init(&bl);
  806. blk_plug_device(mddev->queue);
  807. spin_unlock_irqrestore(&conf->device_lock, flags);
  808. if (do_sync)
  809. md_wakeup_thread(mddev->thread);
  810. #if 0
  811. while ((bio = bio_list_pop(&bl)) != NULL)
  812. generic_make_request(bio);
  813. #endif
  814. return 0;
  815. }
  816. static void status(struct seq_file *seq, mddev_t *mddev)
  817. {
  818. conf_t *conf = mddev_to_conf(mddev);
  819. int i;
  820. seq_printf(seq, " [%d/%d] [", conf->raid_disks,
  821. conf->raid_disks - mddev->degraded);
  822. rcu_read_lock();
  823. for (i = 0; i < conf->raid_disks; i++) {
  824. mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
  825. seq_printf(seq, "%s",
  826. rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
  827. }
  828. rcu_read_unlock();
  829. seq_printf(seq, "]");
  830. }
  831. static void error(mddev_t *mddev, mdk_rdev_t *rdev)
  832. {
  833. char b[BDEVNAME_SIZE];
  834. conf_t *conf = mddev_to_conf(mddev);
  835. /*
  836. * If it is not operational, then we have already marked it as dead
  837. * else if it is the last working disks, ignore the error, let the
  838. * next level up know.
  839. * else mark the drive as failed
  840. */
  841. if (test_bit(In_sync, &rdev->flags)
  842. && (conf->raid_disks - mddev->degraded) == 1)
  843. /*
  844. * Don't fail the drive, act as though we were just a
  845. * normal single drive
  846. */
  847. return;
  848. if (test_and_clear_bit(In_sync, &rdev->flags)) {
  849. unsigned long flags;
  850. spin_lock_irqsave(&conf->device_lock, flags);
  851. mddev->degraded++;
  852. spin_unlock_irqrestore(&conf->device_lock, flags);
  853. /*
  854. * if recovery is running, make sure it aborts.
  855. */
  856. set_bit(MD_RECOVERY_ERR, &mddev->recovery);
  857. }
  858. set_bit(Faulty, &rdev->flags);
  859. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  860. printk(KERN_ALERT "raid1: Disk failure on %s, disabling device. \n"
  861. " Operation continuing on %d devices\n",
  862. bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded);
  863. }
  864. static void print_conf(conf_t *conf)
  865. {
  866. int i;
  867. printk("RAID1 conf printout:\n");
  868. if (!conf) {
  869. printk("(!conf)\n");
  870. return;
  871. }
  872. printk(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
  873. conf->raid_disks);
  874. rcu_read_lock();
  875. for (i = 0; i < conf->raid_disks; i++) {
  876. char b[BDEVNAME_SIZE];
  877. mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
  878. if (rdev)
  879. printk(" disk %d, wo:%d, o:%d, dev:%s\n",
  880. i, !test_bit(In_sync, &rdev->flags),
  881. !test_bit(Faulty, &rdev->flags),
  882. bdevname(rdev->bdev,b));
  883. }
  884. rcu_read_unlock();
  885. }
  886. static void close_sync(conf_t *conf)
  887. {
  888. wait_barrier(conf);
  889. allow_barrier(conf);
  890. mempool_destroy(conf->r1buf_pool);
  891. conf->r1buf_pool = NULL;
  892. }
  893. static int raid1_spare_active(mddev_t *mddev)
  894. {
  895. int i;
  896. conf_t *conf = mddev->private;
  897. /*
  898. * Find all failed disks within the RAID1 configuration
  899. * and mark them readable.
  900. * Called under mddev lock, so rcu protection not needed.
  901. */
  902. for (i = 0; i < conf->raid_disks; i++) {
  903. mdk_rdev_t *rdev = conf->mirrors[i].rdev;
  904. if (rdev
  905. && !test_bit(Faulty, &rdev->flags)
  906. && !test_and_set_bit(In_sync, &rdev->flags)) {
  907. unsigned long flags;
  908. spin_lock_irqsave(&conf->device_lock, flags);
  909. mddev->degraded--;
  910. spin_unlock_irqrestore(&conf->device_lock, flags);
  911. }
  912. }
  913. print_conf(conf);
  914. return 0;
  915. }
  916. static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
  917. {
  918. conf_t *conf = mddev->private;
  919. int found = 0;
  920. int mirror = 0;
  921. mirror_info_t *p;
  922. for (mirror=0; mirror < mddev->raid_disks; mirror++)
  923. if ( !(p=conf->mirrors+mirror)->rdev) {
  924. blk_queue_stack_limits(mddev->queue,
  925. rdev->bdev->bd_disk->queue);
  926. /* as we don't honour merge_bvec_fn, we must never risk
  927. * violating it, so limit ->max_sector to one PAGE, as
  928. * a one page request is never in violation.
  929. */
  930. if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
  931. mddev->queue->max_sectors > (PAGE_SIZE>>9))
  932. blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
  933. p->head_position = 0;
  934. rdev->raid_disk = mirror;
  935. found = 1;
  936. /* As all devices are equivalent, we don't need a full recovery
  937. * if this was recently any drive of the array
  938. */
  939. if (rdev->saved_raid_disk < 0)
  940. conf->fullsync = 1;
  941. rcu_assign_pointer(p->rdev, rdev);
  942. break;
  943. }
  944. print_conf(conf);
  945. return found;
  946. }
  947. static int raid1_remove_disk(mddev_t *mddev, int number)
  948. {
  949. conf_t *conf = mddev->private;
  950. int err = 0;
  951. mdk_rdev_t *rdev;
  952. mirror_info_t *p = conf->mirrors+ number;
  953. print_conf(conf);
  954. rdev = p->rdev;
  955. if (rdev) {
  956. if (test_bit(In_sync, &rdev->flags) ||
  957. atomic_read(&rdev->nr_pending)) {
  958. err = -EBUSY;
  959. goto abort;
  960. }
  961. p->rdev = NULL;
  962. synchronize_rcu();
  963. if (atomic_read(&rdev->nr_pending)) {
  964. /* lost the race, try later */
  965. err = -EBUSY;
  966. p->rdev = rdev;
  967. }
  968. }
  969. abort:
  970. print_conf(conf);
  971. return err;
  972. }
  973. static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error)
  974. {
  975. r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
  976. int i;
  977. if (bio->bi_size)
  978. return 1;
  979. for (i=r1_bio->mddev->raid_disks; i--; )
  980. if (r1_bio->bios[i] == bio)
  981. break;
  982. BUG_ON(i < 0);
  983. update_head_pos(i, r1_bio);
  984. /*
  985. * we have read a block, now it needs to be re-written,
  986. * or re-read if the read failed.
  987. * We don't do much here, just schedule handling by raid1d
  988. */
  989. if (test_bit(BIO_UPTODATE, &bio->bi_flags))
  990. set_bit(R1BIO_Uptodate, &r1_bio->state);
  991. if (atomic_dec_and_test(&r1_bio->remaining))
  992. reschedule_retry(r1_bio);
  993. return 0;
  994. }
  995. static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error)
  996. {
  997. int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  998. r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
  999. mddev_t *mddev = r1_bio->mddev;
  1000. conf_t *conf = mddev_to_conf(mddev);
  1001. int i;
  1002. int mirror=0;
  1003. if (bio->bi_size)
  1004. return 1;
  1005. for (i = 0; i < conf->raid_disks; i++)
  1006. if (r1_bio->bios[i] == bio) {
  1007. mirror = i;
  1008. break;
  1009. }
  1010. if (!uptodate) {
  1011. int sync_blocks = 0;
  1012. sector_t s = r1_bio->sector;
  1013. long sectors_to_go = r1_bio->sectors;
  1014. /* make sure these bits doesn't get cleared. */
  1015. do {
  1016. bitmap_end_sync(mddev->bitmap, s,
  1017. &sync_blocks, 1);
  1018. s += sync_blocks;
  1019. sectors_to_go -= sync_blocks;
  1020. } while (sectors_to_go > 0);
  1021. md_error(mddev, conf->mirrors[mirror].rdev);
  1022. }
  1023. update_head_pos(mirror, r1_bio);
  1024. if (atomic_dec_and_test(&r1_bio->remaining)) {
  1025. md_done_sync(mddev, r1_bio->sectors, uptodate);
  1026. put_buf(r1_bio);
  1027. }
  1028. return 0;
  1029. }
  1030. static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
  1031. {
  1032. conf_t *conf = mddev_to_conf(mddev);
  1033. int i;
  1034. int disks = conf->raid_disks;
  1035. struct bio *bio, *wbio;
  1036. bio = r1_bio->bios[r1_bio->read_disk];
  1037. if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
  1038. /* We have read all readable devices. If we haven't
  1039. * got the block, then there is no hope left.
  1040. * If we have, then we want to do a comparison
  1041. * and skip the write if everything is the same.
  1042. * If any blocks failed to read, then we need to
  1043. * attempt an over-write
  1044. */
  1045. int primary;
  1046. if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) {
  1047. for (i=0; i<mddev->raid_disks; i++)
  1048. if (r1_bio->bios[i]->bi_end_io == end_sync_read)
  1049. md_error(mddev, conf->mirrors[i].rdev);
  1050. md_done_sync(mddev, r1_bio->sectors, 1);
  1051. put_buf(r1_bio);
  1052. return;
  1053. }
  1054. for (primary=0; primary<mddev->raid_disks; primary++)
  1055. if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
  1056. test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) {
  1057. r1_bio->bios[primary]->bi_end_io = NULL;
  1058. rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
  1059. break;
  1060. }
  1061. r1_bio->read_disk = primary;
  1062. for (i=0; i<mddev->raid_disks; i++)
  1063. if (r1_bio->bios[i]->bi_end_io == end_sync_read &&
  1064. test_bit(BIO_UPTODATE, &r1_bio->bios[i]->bi_flags)) {
  1065. int j;
  1066. int vcnt = r1_bio->sectors >> (PAGE_SHIFT- 9);
  1067. struct bio *pbio = r1_bio->bios[primary];
  1068. struct bio *sbio = r1_bio->bios[i];
  1069. for (j = vcnt; j-- ; )
  1070. if (memcmp(page_address(pbio->bi_io_vec[j].bv_page),
  1071. page_address(sbio->bi_io_vec[j].bv_page),
  1072. PAGE_SIZE))
  1073. break;
  1074. if (j >= 0)
  1075. mddev->resync_mismatches += r1_bio->sectors;
  1076. if (j < 0 || test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
  1077. sbio->bi_end_io = NULL;
  1078. rdev_dec_pending(conf->mirrors[i].rdev, mddev);
  1079. } else {
  1080. /* fixup the bio for reuse */
  1081. sbio->bi_vcnt = vcnt;
  1082. sbio->bi_size = r1_bio->sectors << 9;
  1083. sbio->bi_idx = 0;
  1084. sbio->bi_phys_segments = 0;
  1085. sbio->bi_hw_segments = 0;
  1086. sbio->bi_hw_front_size = 0;
  1087. sbio->bi_hw_back_size = 0;
  1088. sbio->bi_flags &= ~(BIO_POOL_MASK - 1);
  1089. sbio->bi_flags |= 1 << BIO_UPTODATE;
  1090. sbio->bi_next = NULL;
  1091. sbio->bi_sector = r1_bio->sector +
  1092. conf->mirrors[i].rdev->data_offset;
  1093. sbio->bi_bdev = conf->mirrors[i].rdev->bdev;
  1094. for (j = 0; j < vcnt ; j++)
  1095. memcpy(page_address(sbio->bi_io_vec[j].bv_page),
  1096. page_address(pbio->bi_io_vec[j].bv_page),
  1097. PAGE_SIZE);
  1098. }
  1099. }
  1100. }
  1101. if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) {
  1102. /* ouch - failed to read all of that.
  1103. * Try some synchronous reads of other devices to get
  1104. * good data, much like with normal read errors. Only
  1105. * read into the pages we already have so we don't
  1106. * need to re-issue the read request.
  1107. * We don't need to freeze the array, because being in an
  1108. * active sync request, there is no normal IO, and
  1109. * no overlapping syncs.
  1110. */
  1111. sector_t sect = r1_bio->sector;
  1112. int sectors = r1_bio->sectors;
  1113. int idx = 0;
  1114. while(sectors) {
  1115. int s = sectors;
  1116. int d = r1_bio->read_disk;
  1117. int success = 0;
  1118. mdk_rdev_t *rdev;
  1119. if (s > (PAGE_SIZE>>9))
  1120. s = PAGE_SIZE >> 9;
  1121. do {
  1122. if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
  1123. /* No rcu protection needed here devices
  1124. * can only be removed when no resync is
  1125. * active, and resync is currently active
  1126. */
  1127. rdev = conf->mirrors[d].rdev;
  1128. if (sync_page_io(rdev->bdev,
  1129. sect + rdev->data_offset,
  1130. s<<9,
  1131. bio->bi_io_vec[idx].bv_page,
  1132. READ)) {
  1133. success = 1;
  1134. break;
  1135. }
  1136. }
  1137. d++;
  1138. if (d == conf->raid_disks)
  1139. d = 0;
  1140. } while (!success && d != r1_bio->read_disk);
  1141. if (success) {
  1142. int start = d;
  1143. /* write it back and re-read */
  1144. set_bit(R1BIO_Uptodate, &r1_bio->state);
  1145. while (d != r1_bio->read_disk) {
  1146. if (d == 0)
  1147. d = conf->raid_disks;
  1148. d--;
  1149. if (r1_bio->bios[d]->bi_end_io != end_sync_read)
  1150. continue;
  1151. rdev = conf->mirrors[d].rdev;
  1152. atomic_add(s, &rdev->corrected_errors);
  1153. if (sync_page_io(rdev->bdev,
  1154. sect + rdev->data_offset,
  1155. s<<9,
  1156. bio->bi_io_vec[idx].bv_page,
  1157. WRITE) == 0)
  1158. md_error(mddev, rdev);
  1159. }
  1160. d = start;
  1161. while (d != r1_bio->read_disk) {
  1162. if (d == 0)
  1163. d = conf->raid_disks;
  1164. d--;
  1165. if (r1_bio->bios[d]->bi_end_io != end_sync_read)
  1166. continue;
  1167. rdev = conf->mirrors[d].rdev;
  1168. if (sync_page_io(rdev->bdev,
  1169. sect + rdev->data_offset,
  1170. s<<9,
  1171. bio->bi_io_vec[idx].bv_page,
  1172. READ) == 0)
  1173. md_error(mddev, rdev);
  1174. }
  1175. } else {
  1176. char b[BDEVNAME_SIZE];
  1177. /* Cannot read from anywhere, array is toast */
  1178. md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
  1179. printk(KERN_ALERT "raid1: %s: unrecoverable I/O read error"
  1180. " for block %llu\n",
  1181. bdevname(bio->bi_bdev,b),
  1182. (unsigned long long)r1_bio->sector);
  1183. md_done_sync(mddev, r1_bio->sectors, 0);
  1184. put_buf(r1_bio);
  1185. return;
  1186. }
  1187. sectors -= s;
  1188. sect += s;
  1189. idx ++;
  1190. }
  1191. }
  1192. /*
  1193. * schedule writes
  1194. */
  1195. atomic_set(&r1_bio->remaining, 1);
  1196. for (i = 0; i < disks ; i++) {
  1197. wbio = r1_bio->bios[i];
  1198. if (wbio->bi_end_io == NULL ||
  1199. (wbio->bi_end_io == end_sync_read &&
  1200. (i == r1_bio->read_disk ||
  1201. !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
  1202. continue;
  1203. wbio->bi_rw = WRITE;
  1204. wbio->bi_end_io = end_sync_write;
  1205. atomic_inc(&r1_bio->remaining);
  1206. md_sync_acct(conf->mirrors[i].rdev->bdev, wbio->bi_size >> 9);
  1207. generic_make_request(wbio);
  1208. }
  1209. if (atomic_dec_and_test(&r1_bio->remaining)) {
  1210. /* if we're here, all write(s) have completed, so clean up */
  1211. md_done_sync(mddev, r1_bio->sectors, 1);
  1212. put_buf(r1_bio);
  1213. }
  1214. }
  1215. /*
  1216. * This is a kernel thread which:
  1217. *
  1218. * 1. Retries failed read operations on working mirrors.
  1219. * 2. Updates the raid superblock when problems encounter.
  1220. * 3. Performs writes following reads for array syncronising.
  1221. */
  1222. static void fix_read_error(conf_t *conf, int read_disk,
  1223. sector_t sect, int sectors)
  1224. {
  1225. mddev_t *mddev = conf->mddev;
  1226. while(sectors) {
  1227. int s = sectors;
  1228. int d = read_disk;
  1229. int success = 0;
  1230. int start;
  1231. mdk_rdev_t *rdev;
  1232. if (s > (PAGE_SIZE>>9))
  1233. s = PAGE_SIZE >> 9;
  1234. do {
  1235. /* Note: no rcu protection needed here
  1236. * as this is synchronous in the raid1d thread
  1237. * which is the thread that might remove
  1238. * a device. If raid1d ever becomes multi-threaded....
  1239. */
  1240. rdev = conf->mirrors[d].rdev;
  1241. if (rdev &&
  1242. test_bit(In_sync, &rdev->flags) &&
  1243. sync_page_io(rdev->bdev,
  1244. sect + rdev->data_offset,
  1245. s<<9,
  1246. conf->tmppage, READ))
  1247. success = 1;
  1248. else {
  1249. d++;
  1250. if (d == conf->raid_disks)
  1251. d = 0;
  1252. }
  1253. } while (!success && d != read_disk);
  1254. if (!success) {
  1255. /* Cannot read from anywhere -- bye bye array */
  1256. md_error(mddev, conf->mirrors[read_disk].rdev);
  1257. break;
  1258. }
  1259. /* write it back and re-read */
  1260. start = d;
  1261. while (d != read_disk) {
  1262. if (d==0)
  1263. d = conf->raid_disks;
  1264. d--;
  1265. rdev = conf->mirrors[d].rdev;
  1266. if (rdev &&
  1267. test_bit(In_sync, &rdev->flags)) {
  1268. if (sync_page_io(rdev->bdev,
  1269. sect + rdev->data_offset,
  1270. s<<9, conf->tmppage, WRITE)
  1271. == 0)
  1272. /* Well, this device is dead */
  1273. md_error(mddev, rdev);
  1274. }
  1275. }
  1276. d = start;
  1277. while (d != read_disk) {
  1278. char b[BDEVNAME_SIZE];
  1279. if (d==0)
  1280. d = conf->raid_disks;
  1281. d--;
  1282. rdev = conf->mirrors[d].rdev;
  1283. if (rdev &&
  1284. test_bit(In_sync, &rdev->flags)) {
  1285. if (sync_page_io(rdev->bdev,
  1286. sect + rdev->data_offset,
  1287. s<<9, conf->tmppage, READ)
  1288. == 0)
  1289. /* Well, this device is dead */
  1290. md_error(mddev, rdev);
  1291. else {
  1292. atomic_add(s, &rdev->corrected_errors);
  1293. printk(KERN_INFO
  1294. "raid1:%s: read error corrected "
  1295. "(%d sectors at %llu on %s)\n",
  1296. mdname(mddev), s,
  1297. (unsigned long long)(sect +
  1298. rdev->data_offset),
  1299. bdevname(rdev->bdev, b));
  1300. }
  1301. }
  1302. }
  1303. sectors -= s;
  1304. sect += s;
  1305. }
  1306. }
  1307. static void raid1d(mddev_t *mddev)
  1308. {
  1309. r1bio_t *r1_bio;
  1310. struct bio *bio;
  1311. unsigned long flags;
  1312. conf_t *conf = mddev_to_conf(mddev);
  1313. struct list_head *head = &conf->retry_list;
  1314. int unplug=0;
  1315. mdk_rdev_t *rdev;
  1316. md_check_recovery(mddev);
  1317. for (;;) {
  1318. char b[BDEVNAME_SIZE];
  1319. spin_lock_irqsave(&conf->device_lock, flags);
  1320. if (conf->pending_bio_list.head) {
  1321. bio = bio_list_get(&conf->pending_bio_list);
  1322. blk_remove_plug(mddev->queue);
  1323. spin_unlock_irqrestore(&conf->device_lock, flags);
  1324. /* flush any pending bitmap writes to disk before proceeding w/ I/O */
  1325. if (bitmap_unplug(mddev->bitmap) != 0)
  1326. printk("%s: bitmap file write failed!\n", mdname(mddev));
  1327. while (bio) { /* submit pending writes */
  1328. struct bio *next = bio->bi_next;
  1329. bio->bi_next = NULL;
  1330. generic_make_request(bio);
  1331. bio = next;
  1332. }
  1333. unplug = 1;
  1334. continue;
  1335. }
  1336. if (list_empty(head))
  1337. break;
  1338. r1_bio = list_entry(head->prev, r1bio_t, retry_list);
  1339. list_del(head->prev);
  1340. conf->nr_queued--;
  1341. spin_unlock_irqrestore(&conf->device_lock, flags);
  1342. mddev = r1_bio->mddev;
  1343. conf = mddev_to_conf(mddev);
  1344. if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
  1345. sync_request_write(mddev, r1_bio);
  1346. unplug = 1;
  1347. } else if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) {
  1348. /* some requests in the r1bio were BIO_RW_BARRIER
  1349. * requests which failed with -EOPNOTSUPP. Hohumm..
  1350. * Better resubmit without the barrier.
  1351. * We know which devices to resubmit for, because
  1352. * all others have had their bios[] entry cleared.
  1353. * We already have a nr_pending reference on these rdevs.
  1354. */
  1355. int i;
  1356. const int do_sync = bio_sync(r1_bio->master_bio);
  1357. clear_bit(R1BIO_BarrierRetry, &r1_bio->state);
  1358. clear_bit(R1BIO_Barrier, &r1_bio->state);
  1359. for (i=0; i < conf->raid_disks; i++)
  1360. if (r1_bio->bios[i])
  1361. atomic_inc(&r1_bio->remaining);
  1362. for (i=0; i < conf->raid_disks; i++)
  1363. if (r1_bio->bios[i]) {
  1364. struct bio_vec *bvec;
  1365. int j;
  1366. bio = bio_clone(r1_bio->master_bio, GFP_NOIO);
  1367. /* copy pages from the failed bio, as
  1368. * this might be a write-behind device */
  1369. __bio_for_each_segment(bvec, bio, j, 0)
  1370. bvec->bv_page = bio_iovec_idx(r1_bio->bios[i], j)->bv_page;
  1371. bio_put(r1_bio->bios[i]);
  1372. bio->bi_sector = r1_bio->sector +
  1373. conf->mirrors[i].rdev->data_offset;
  1374. bio->bi_bdev = conf->mirrors[i].rdev->bdev;
  1375. bio->bi_end_io = raid1_end_write_request;
  1376. bio->bi_rw = WRITE | do_sync;
  1377. bio->bi_private = r1_bio;
  1378. r1_bio->bios[i] = bio;
  1379. generic_make_request(bio);
  1380. }
  1381. } else {
  1382. int disk;
  1383. /* we got a read error. Maybe the drive is bad. Maybe just
  1384. * the block and we can fix it.
  1385. * We freeze all other IO, and try reading the block from
  1386. * other devices. When we find one, we re-write
  1387. * and check it that fixes the read error.
  1388. * This is all done synchronously while the array is
  1389. * frozen
  1390. */
  1391. if (mddev->ro == 0) {
  1392. freeze_array(conf);
  1393. fix_read_error(conf, r1_bio->read_disk,
  1394. r1_bio->sector,
  1395. r1_bio->sectors);
  1396. unfreeze_array(conf);
  1397. }
  1398. bio = r1_bio->bios[r1_bio->read_disk];
  1399. if ((disk=read_balance(conf, r1_bio)) == -1) {
  1400. printk(KERN_ALERT "raid1: %s: unrecoverable I/O"
  1401. " read error for block %llu\n",
  1402. bdevname(bio->bi_bdev,b),
  1403. (unsigned long long)r1_bio->sector);
  1404. raid_end_bio_io(r1_bio);
  1405. } else {
  1406. const int do_sync = bio_sync(r1_bio->master_bio);
  1407. r1_bio->bios[r1_bio->read_disk] =
  1408. mddev->ro ? IO_BLOCKED : NULL;
  1409. r1_bio->read_disk = disk;
  1410. bio_put(bio);
  1411. bio = bio_clone(r1_bio->master_bio, GFP_NOIO);
  1412. r1_bio->bios[r1_bio->read_disk] = bio;
  1413. rdev = conf->mirrors[disk].rdev;
  1414. if (printk_ratelimit())
  1415. printk(KERN_ERR "raid1: %s: redirecting sector %llu to"
  1416. " another mirror\n",
  1417. bdevname(rdev->bdev,b),
  1418. (unsigned long long)r1_bio->sector);
  1419. bio->bi_sector = r1_bio->sector + rdev->data_offset;
  1420. bio->bi_bdev = rdev->bdev;
  1421. bio->bi_end_io = raid1_end_read_request;
  1422. bio->bi_rw = READ | do_sync;
  1423. bio->bi_private = r1_bio;
  1424. unplug = 1;
  1425. generic_make_request(bio);
  1426. }
  1427. }
  1428. }
  1429. spin_unlock_irqrestore(&conf->device_lock, flags);
  1430. if (unplug)
  1431. unplug_slaves(mddev);
  1432. }
  1433. static int init_resync(conf_t *conf)
  1434. {
  1435. int buffs;
  1436. buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
  1437. BUG_ON(conf->r1buf_pool);
  1438. conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free,
  1439. conf->poolinfo);
  1440. if (!conf->r1buf_pool)
  1441. return -ENOMEM;
  1442. conf->next_resync = 0;
  1443. return 0;
  1444. }
  1445. /*
  1446. * perform a "sync" on one "block"
  1447. *
  1448. * We need to make sure that no normal I/O request - particularly write
  1449. * requests - conflict with active sync requests.
  1450. *
  1451. * This is achieved by tracking pending requests and a 'barrier' concept
  1452. * that can be installed to exclude normal IO requests.
  1453. */
  1454. static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
  1455. {
  1456. conf_t *conf = mddev_to_conf(mddev);
  1457. r1bio_t *r1_bio;
  1458. struct bio *bio;
  1459. sector_t max_sector, nr_sectors;
  1460. int disk = -1;
  1461. int i;
  1462. int wonly = -1;
  1463. int write_targets = 0, read_targets = 0;
  1464. int sync_blocks;
  1465. int still_degraded = 0;
  1466. if (!conf->r1buf_pool)
  1467. {
  1468. /*
  1469. printk("sync start - bitmap %p\n", mddev->bitmap);
  1470. */
  1471. if (init_resync(conf))
  1472. return 0;
  1473. }
  1474. max_sector = mddev->size << 1;
  1475. if (sector_nr >= max_sector) {
  1476. /* If we aborted, we need to abort the
  1477. * sync on the 'current' bitmap chunk (there will
  1478. * only be one in raid1 resync.
  1479. * We can find the current addess in mddev->curr_resync
  1480. */
  1481. if (mddev->curr_resync < max_sector) /* aborted */
  1482. bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
  1483. &sync_blocks, 1);
  1484. else /* completed sync */
  1485. conf->fullsync = 0;
  1486. bitmap_close_sync(mddev->bitmap);
  1487. close_sync(conf);
  1488. return 0;
  1489. }
  1490. if (mddev->bitmap == NULL &&
  1491. mddev->recovery_cp == MaxSector &&
  1492. !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
  1493. conf->fullsync == 0) {
  1494. *skipped = 1;
  1495. return max_sector - sector_nr;
  1496. }
  1497. /* before building a request, check if we can skip these blocks..
  1498. * This call the bitmap_start_sync doesn't actually record anything
  1499. */
  1500. if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
  1501. !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
  1502. /* We can skip this block, and probably several more */
  1503. *skipped = 1;
  1504. return sync_blocks;
  1505. }
  1506. /*
  1507. * If there is non-resync activity waiting for a turn,
  1508. * and resync is going fast enough,
  1509. * then let it though before starting on this new sync request.
  1510. */
  1511. if (!go_faster && conf->nr_waiting)
  1512. msleep_interruptible(1000);
  1513. raise_barrier(conf);
  1514. conf->next_resync = sector_nr;
  1515. r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
  1516. rcu_read_lock();
  1517. /*
  1518. * If we get a correctably read error during resync or recovery,
  1519. * we might want to read from a different device. So we
  1520. * flag all drives that could conceivably be read from for READ,
  1521. * and any others (which will be non-In_sync devices) for WRITE.
  1522. * If a read fails, we try reading from something else for which READ
  1523. * is OK.
  1524. */
  1525. r1_bio->mddev = mddev;
  1526. r1_bio->sector = sector_nr;
  1527. r1_bio->state = 0;
  1528. set_bit(R1BIO_IsSync, &r1_bio->state);
  1529. for (i=0; i < conf->raid_disks; i++) {
  1530. mdk_rdev_t *rdev;
  1531. bio = r1_bio->bios[i];
  1532. /* take from bio_init */
  1533. bio->bi_next = NULL;
  1534. bio->bi_flags |= 1 << BIO_UPTODATE;
  1535. bio->bi_rw = READ;
  1536. bio->bi_vcnt = 0;
  1537. bio->bi_idx = 0;
  1538. bio->bi_phys_segments = 0;
  1539. bio->bi_hw_segments = 0;
  1540. bio->bi_size = 0;
  1541. bio->bi_end_io = NULL;
  1542. bio->bi_private = NULL;
  1543. rdev = rcu_dereference(conf->mirrors[i].rdev);
  1544. if (rdev == NULL ||
  1545. test_bit(Faulty, &rdev->flags)) {
  1546. still_degraded = 1;
  1547. continue;
  1548. } else if (!test_bit(In_sync, &rdev->flags)) {
  1549. bio->bi_rw = WRITE;
  1550. bio->bi_end_io = end_sync_write;
  1551. write_targets ++;
  1552. } else {
  1553. /* may need to read from here */
  1554. bio->bi_rw = READ;
  1555. bio->bi_end_io = end_sync_read;
  1556. if (test_bit(WriteMostly, &rdev->flags)) {
  1557. if (wonly < 0)
  1558. wonly = i;
  1559. } else {
  1560. if (disk < 0)
  1561. disk = i;
  1562. }
  1563. read_targets++;
  1564. }
  1565. atomic_inc(&rdev->nr_pending);
  1566. bio->bi_sector = sector_nr + rdev->data_offset;
  1567. bio->bi_bdev = rdev->bdev;
  1568. bio->bi_private = r1_bio;
  1569. }
  1570. rcu_read_unlock();
  1571. if (disk < 0)
  1572. disk = wonly;
  1573. r1_bio->read_disk = disk;
  1574. if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
  1575. /* extra read targets are also write targets */
  1576. write_targets += read_targets-1;
  1577. if (write_targets == 0 || read_targets == 0) {
  1578. /* There is nowhere to write, so all non-sync
  1579. * drives must be failed - so we are finished
  1580. */
  1581. sector_t rv = max_sector - sector_nr;
  1582. *skipped = 1;
  1583. put_buf(r1_bio);
  1584. return rv;
  1585. }
  1586. nr_sectors = 0;
  1587. sync_blocks = 0;
  1588. do {
  1589. struct page *page;
  1590. int len = PAGE_SIZE;
  1591. if (sector_nr + (len>>9) > max_sector)
  1592. len = (max_sector - sector_nr) << 9;
  1593. if (len == 0)
  1594. break;
  1595. if (sync_blocks == 0) {
  1596. if (!bitmap_start_sync(mddev->bitmap, sector_nr,
  1597. &sync_blocks, still_degraded) &&
  1598. !conf->fullsync &&
  1599. !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
  1600. break;
  1601. BUG_ON(sync_blocks < (PAGE_SIZE>>9));
  1602. if (len > (sync_blocks<<9))
  1603. len = sync_blocks<<9;
  1604. }
  1605. for (i=0 ; i < conf->raid_disks; i++) {
  1606. bio = r1_bio->bios[i];
  1607. if (bio->bi_end_io) {
  1608. page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
  1609. if (bio_add_page(bio, page, len, 0) == 0) {
  1610. /* stop here */
  1611. bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
  1612. while (i > 0) {
  1613. i--;
  1614. bio = r1_bio->bios[i];
  1615. if (bio->bi_end_io==NULL)
  1616. continue;
  1617. /* remove last page from this bio */
  1618. bio->bi_vcnt--;
  1619. bio->bi_size -= len;
  1620. bio->bi_flags &= ~(1<< BIO_SEG_VALID);
  1621. }
  1622. goto bio_full;
  1623. }
  1624. }
  1625. }
  1626. nr_sectors += len>>9;
  1627. sector_nr += len>>9;
  1628. sync_blocks -= (len>>9);
  1629. } while (r1_bio->bios[disk]->bi_vcnt < RESYNC_PAGES);
  1630. bio_full:
  1631. r1_bio->sectors = nr_sectors;
  1632. /* For a user-requested sync, we read all readable devices and do a
  1633. * compare
  1634. */
  1635. if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
  1636. atomic_set(&r1_bio->remaining, read_targets);
  1637. for (i=0; i<conf->raid_disks; i++) {
  1638. bio = r1_bio->bios[i];
  1639. if (bio->bi_end_io == end_sync_read) {
  1640. md_sync_acct(bio->bi_bdev, nr_sectors);
  1641. generic_make_request(bio);
  1642. }
  1643. }
  1644. } else {
  1645. atomic_set(&r1_bio->remaining, 1);
  1646. bio = r1_bio->bios[r1_bio->read_disk];
  1647. md_sync_acct(bio->bi_bdev, nr_sectors);
  1648. generic_make_request(bio);
  1649. }
  1650. return nr_sectors;
  1651. }
  1652. static int run(mddev_t *mddev)
  1653. {
  1654. conf_t *conf;
  1655. int i, j, disk_idx;
  1656. mirror_info_t *disk;
  1657. mdk_rdev_t *rdev;
  1658. struct list_head *tmp;
  1659. if (mddev->level != 1) {
  1660. printk("raid1: %s: raid level not set to mirroring (%d)\n",
  1661. mdname(mddev), mddev->level);
  1662. goto out;
  1663. }
  1664. if (mddev->reshape_position != MaxSector) {
  1665. printk("raid1: %s: reshape_position set but not supported\n",
  1666. mdname(mddev));
  1667. goto out;
  1668. }
  1669. /*
  1670. * copy the already verified devices into our private RAID1
  1671. * bookkeeping area. [whatever we allocate in run(),
  1672. * should be freed in stop()]
  1673. */
  1674. conf = kzalloc(sizeof(conf_t), GFP_KERNEL);
  1675. mddev->private = conf;
  1676. if (!conf)
  1677. goto out_no_mem;
  1678. conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks,
  1679. GFP_KERNEL);
  1680. if (!conf->mirrors)
  1681. goto out_no_mem;
  1682. conf->tmppage = alloc_page(GFP_KERNEL);
  1683. if (!conf->tmppage)
  1684. goto out_no_mem;
  1685. conf->poolinfo = kmalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
  1686. if (!conf->poolinfo)
  1687. goto out_no_mem;
  1688. conf->poolinfo->mddev = mddev;
  1689. conf->poolinfo->raid_disks = mddev->raid_disks;
  1690. conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
  1691. r1bio_pool_free,
  1692. conf->poolinfo);
  1693. if (!conf->r1bio_pool)
  1694. goto out_no_mem;
  1695. ITERATE_RDEV(mddev, rdev, tmp) {
  1696. disk_idx = rdev->raid_disk;
  1697. if (disk_idx >= mddev->raid_disks
  1698. || disk_idx < 0)
  1699. continue;
  1700. disk = conf->mirrors + disk_idx;
  1701. disk->rdev = rdev;
  1702. blk_queue_stack_limits(mddev->queue,
  1703. rdev->bdev->bd_disk->queue);
  1704. /* as we don't honour merge_bvec_fn, we must never risk
  1705. * violating it, so limit ->max_sector to one PAGE, as
  1706. * a one page request is never in violation.
  1707. */
  1708. if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
  1709. mddev->queue->max_sectors > (PAGE_SIZE>>9))
  1710. blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
  1711. disk->head_position = 0;
  1712. }
  1713. conf->raid_disks = mddev->raid_disks;
  1714. conf->mddev = mddev;
  1715. spin_lock_init(&conf->device_lock);
  1716. INIT_LIST_HEAD(&conf->retry_list);
  1717. spin_lock_init(&conf->resync_lock);
  1718. init_waitqueue_head(&conf->wait_barrier);
  1719. bio_list_init(&conf->pending_bio_list);
  1720. bio_list_init(&conf->flushing_bio_list);
  1721. mddev->degraded = 0;
  1722. for (i = 0; i < conf->raid_disks; i++) {
  1723. disk = conf->mirrors + i;
  1724. if (!disk->rdev ||
  1725. !test_bit(In_sync, &disk->rdev->flags)) {
  1726. disk->head_position = 0;
  1727. mddev->degraded++;
  1728. conf->fullsync = 1;
  1729. }
  1730. }
  1731. if (mddev->degraded == conf->raid_disks) {
  1732. printk(KERN_ERR "raid1: no operational mirrors for %s\n",
  1733. mdname(mddev));
  1734. goto out_free_conf;
  1735. }
  1736. if (conf->raid_disks - mddev->degraded == 1)
  1737. mddev->recovery_cp = MaxSector;
  1738. /*
  1739. * find the first working one and use it as a starting point
  1740. * to read balancing.
  1741. */
  1742. for (j = 0; j < conf->raid_disks &&
  1743. (!conf->mirrors[j].rdev ||
  1744. !test_bit(In_sync, &conf->mirrors[j].rdev->flags)) ; j++)
  1745. /* nothing */;
  1746. conf->last_used = j;
  1747. mddev->thread = md_register_thread(raid1d, mddev, "%s_raid1");
  1748. if (!mddev->thread) {
  1749. printk(KERN_ERR
  1750. "raid1: couldn't allocate thread for %s\n",
  1751. mdname(mddev));
  1752. goto out_free_conf;
  1753. }
  1754. printk(KERN_INFO
  1755. "raid1: raid set %s active with %d out of %d mirrors\n",
  1756. mdname(mddev), mddev->raid_disks - mddev->degraded,
  1757. mddev->raid_disks);
  1758. /*
  1759. * Ok, everything is just fine now
  1760. */
  1761. mddev->array_size = mddev->size;
  1762. mddev->queue->unplug_fn = raid1_unplug;
  1763. mddev->queue->issue_flush_fn = raid1_issue_flush;
  1764. mddev->queue->backing_dev_info.congested_fn = raid1_congested;
  1765. mddev->queue->backing_dev_info.congested_data = mddev;
  1766. return 0;
  1767. out_no_mem:
  1768. printk(KERN_ERR "raid1: couldn't allocate memory for %s\n",
  1769. mdname(mddev));
  1770. out_free_conf:
  1771. if (conf) {
  1772. if (conf->r1bio_pool)
  1773. mempool_destroy(conf->r1bio_pool);
  1774. kfree(conf->mirrors);
  1775. safe_put_page(conf->tmppage);
  1776. kfree(conf->poolinfo);
  1777. kfree(conf);
  1778. mddev->private = NULL;
  1779. }
  1780. out:
  1781. return -EIO;
  1782. }
  1783. static int stop(mddev_t *mddev)
  1784. {
  1785. conf_t *conf = mddev_to_conf(mddev);
  1786. struct bitmap *bitmap = mddev->bitmap;
  1787. int behind_wait = 0;
  1788. /* wait for behind writes to complete */
  1789. while (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
  1790. behind_wait++;
  1791. printk(KERN_INFO "raid1: behind writes in progress on device %s, waiting to stop (%d)\n", mdname(mddev), behind_wait);
  1792. set_current_state(TASK_UNINTERRUPTIBLE);
  1793. schedule_timeout(HZ); /* wait a second */
  1794. /* need to kick something here to make sure I/O goes? */
  1795. }
  1796. md_unregister_thread(mddev->thread);
  1797. mddev->thread = NULL;
  1798. blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
  1799. if (conf->r1bio_pool)
  1800. mempool_destroy(conf->r1bio_pool);
  1801. kfree(conf->mirrors);
  1802. kfree(conf->poolinfo);
  1803. kfree(conf);
  1804. mddev->private = NULL;
  1805. return 0;
  1806. }
  1807. static int raid1_resize(mddev_t *mddev, sector_t sectors)
  1808. {
  1809. /* no resync is happening, and there is enough space
  1810. * on all devices, so we can resize.
  1811. * We need to make sure resync covers any new space.
  1812. * If the array is shrinking we should possibly wait until
  1813. * any io in the removed space completes, but it hardly seems
  1814. * worth it.
  1815. */
  1816. mddev->array_size = sectors>>1;
  1817. set_capacity(mddev->gendisk, mddev->array_size << 1);
  1818. mddev->changed = 1;
  1819. if (mddev->array_size > mddev->size && mddev->recovery_cp == MaxSector) {
  1820. mddev->recovery_cp = mddev->size << 1;
  1821. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  1822. }
  1823. mddev->size = mddev->array_size;
  1824. mddev->resync_max_sectors = sectors;
  1825. return 0;
  1826. }
  1827. static int raid1_reshape(mddev_t *mddev)
  1828. {
  1829. /* We need to:
  1830. * 1/ resize the r1bio_pool
  1831. * 2/ resize conf->mirrors
  1832. *
  1833. * We allocate a new r1bio_pool if we can.
  1834. * Then raise a device barrier and wait until all IO stops.
  1835. * Then resize conf->mirrors and swap in the new r1bio pool.
  1836. *
  1837. * At the same time, we "pack" the devices so that all the missing
  1838. * devices have the higher raid_disk numbers.
  1839. */
  1840. mempool_t *newpool, *oldpool;
  1841. struct pool_info *newpoolinfo;
  1842. mirror_info_t *newmirrors;
  1843. conf_t *conf = mddev_to_conf(mddev);
  1844. int cnt, raid_disks;
  1845. unsigned long flags;
  1846. int d, d2;
  1847. /* Cannot change chunk_size, layout, or level */
  1848. if (mddev->chunk_size != mddev->new_chunk ||
  1849. mddev->layout != mddev->new_layout ||
  1850. mddev->level != mddev->new_level) {
  1851. mddev->new_chunk = mddev->chunk_size;
  1852. mddev->new_layout = mddev->layout;
  1853. mddev->new_level = mddev->level;
  1854. return -EINVAL;
  1855. }
  1856. md_allow_write(mddev);
  1857. raid_disks = mddev->raid_disks + mddev->delta_disks;
  1858. if (raid_disks < conf->raid_disks) {
  1859. cnt=0;
  1860. for (d= 0; d < conf->raid_disks; d++)
  1861. if (conf->mirrors[d].rdev)
  1862. cnt++;
  1863. if (cnt > raid_disks)
  1864. return -EBUSY;
  1865. }
  1866. newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL);
  1867. if (!newpoolinfo)
  1868. return -ENOMEM;
  1869. newpoolinfo->mddev = mddev;
  1870. newpoolinfo->raid_disks = raid_disks;
  1871. newpool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
  1872. r1bio_pool_free, newpoolinfo);
  1873. if (!newpool) {
  1874. kfree(newpoolinfo);
  1875. return -ENOMEM;
  1876. }
  1877. newmirrors = kzalloc(sizeof(struct mirror_info) * raid_disks, GFP_KERNEL);
  1878. if (!newmirrors) {
  1879. kfree(newpoolinfo);
  1880. mempool_destroy(newpool);
  1881. return -ENOMEM;
  1882. }
  1883. raise_barrier(conf);
  1884. /* ok, everything is stopped */
  1885. oldpool = conf->r1bio_pool;
  1886. conf->r1bio_pool = newpool;
  1887. for (d=d2=0; d < conf->raid_disks; d++)
  1888. if (conf->mirrors[d].rdev) {
  1889. conf->mirrors[d].rdev->raid_disk = d2;
  1890. newmirrors[d2++].rdev = conf->mirrors[d].rdev;
  1891. }
  1892. kfree(conf->mirrors);
  1893. conf->mirrors = newmirrors;
  1894. kfree(conf->poolinfo);
  1895. conf->poolinfo = newpoolinfo;
  1896. spin_lock_irqsave(&conf->device_lock, flags);
  1897. mddev->degraded += (raid_disks - conf->raid_disks);
  1898. spin_unlock_irqrestore(&conf->device_lock, flags);
  1899. conf->raid_disks = mddev->raid_disks = raid_disks;
  1900. mddev->delta_disks = 0;
  1901. conf->last_used = 0; /* just make sure it is in-range */
  1902. lower_barrier(conf);
  1903. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  1904. md_wakeup_thread(mddev->thread);
  1905. mempool_destroy(oldpool);
  1906. return 0;
  1907. }
  1908. static void raid1_quiesce(mddev_t *mddev, int state)
  1909. {
  1910. conf_t *conf = mddev_to_conf(mddev);
  1911. switch(state) {
  1912. case 1:
  1913. raise_barrier(conf);
  1914. break;
  1915. case 0:
  1916. lower_barrier(conf);
  1917. break;
  1918. }
  1919. }
  1920. static struct mdk_personality raid1_personality =
  1921. {
  1922. .name = "raid1",
  1923. .level = 1,
  1924. .owner = THIS_MODULE,
  1925. .make_request = make_request,
  1926. .run = run,
  1927. .stop = stop,
  1928. .status = status,
  1929. .error_handler = error,
  1930. .hot_add_disk = raid1_add_disk,
  1931. .hot_remove_disk= raid1_remove_disk,
  1932. .spare_active = raid1_spare_active,
  1933. .sync_request = sync_request,
  1934. .resize = raid1_resize,
  1935. .check_reshape = raid1_reshape,
  1936. .quiesce = raid1_quiesce,
  1937. };
  1938. static int __init raid_init(void)
  1939. {
  1940. return register_md_personality(&raid1_personality);
  1941. }
  1942. static void raid_exit(void)
  1943. {
  1944. unregister_md_personality(&raid1_personality);
  1945. }
  1946. module_init(raid_init);
  1947. module_exit(raid_exit);
  1948. MODULE_LICENSE("GPL");
  1949. MODULE_ALIAS("md-personality-3"); /* RAID1 */
  1950. MODULE_ALIAS("md-raid1");
  1951. MODULE_ALIAS("md-level-1");