raid1.c 79 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967
  1. /*
  2. * raid1.c : Multiple Devices driver for Linux
  3. *
  4. * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
  5. *
  6. * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman
  7. *
  8. * RAID-1 management functions.
  9. *
  10. * Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000
  11. *
  12. * Fixes to reconstruction by Jakob Østergaard" <jakob@ostenfeld.dk>
  13. * Various fixes by Neil Brown <neilb@cse.unsw.edu.au>
  14. *
  15. * Changes by Peter T. Breuer <ptb@it.uc3m.es> 31/1/2003 to support
  16. * bitmapped intelligence in resync:
  17. *
  18. * - bitmap marked during normal i/o
  19. * - bitmap used to skip nondirty blocks during sync
  20. *
  21. * Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology:
  22. * - persistent bitmap code
  23. *
  24. * This program is free software; you can redistribute it and/or modify
  25. * it under the terms of the GNU General Public License as published by
  26. * the Free Software Foundation; either version 2, or (at your option)
  27. * any later version.
  28. *
  29. * You should have received a copy of the GNU General Public License
  30. * (for example /usr/src/linux/COPYING); if not, write to the Free
  31. * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  32. */
  33. #include <linux/slab.h>
  34. #include <linux/delay.h>
  35. #include <linux/blkdev.h>
  36. #include <linux/module.h>
  37. #include <linux/seq_file.h>
  38. #include <linux/ratelimit.h>
  39. #include "md.h"
  40. #include "raid1.h"
  41. #include "bitmap.h"
  42. /*
  43. * Number of guaranteed r1bios in case of extreme VM load:
  44. */
  45. #define NR_RAID1_BIOS 256
  46. /* When there are this many requests queue to be written by
  47. * the raid1 thread, we become 'congested' to provide back-pressure
  48. * for writeback.
  49. */
  50. static int max_queued_requests = 1024;
  51. static void allow_barrier(struct r1conf *conf);
  52. static void lower_barrier(struct r1conf *conf);
  53. static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
  54. {
  55. struct pool_info *pi = data;
  56. int size = offsetof(struct r1bio, bios[pi->raid_disks]);
  57. /* allocate a r1bio with room for raid_disks entries in the bios array */
  58. return kzalloc(size, gfp_flags);
  59. }
  60. static void r1bio_pool_free(void *r1_bio, void *data)
  61. {
  62. kfree(r1_bio);
  63. }
  64. #define RESYNC_BLOCK_SIZE (64*1024)
  65. //#define RESYNC_BLOCK_SIZE PAGE_SIZE
  66. #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
  67. #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
  68. #define RESYNC_WINDOW (2048*1024)
  69. static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
  70. {
  71. struct pool_info *pi = data;
  72. struct page *page;
  73. struct r1bio *r1_bio;
  74. struct bio *bio;
  75. int i, j;
  76. r1_bio = r1bio_pool_alloc(gfp_flags, pi);
  77. if (!r1_bio)
  78. return NULL;
  79. /*
  80. * Allocate bios : 1 for reading, n-1 for writing
  81. */
  82. for (j = pi->raid_disks ; j-- ; ) {
  83. bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
  84. if (!bio)
  85. goto out_free_bio;
  86. r1_bio->bios[j] = bio;
  87. }
  88. /*
  89. * Allocate RESYNC_PAGES data pages and attach them to
  90. * the first bio.
  91. * If this is a user-requested check/repair, allocate
  92. * RESYNC_PAGES for each bio.
  93. */
  94. if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
  95. j = pi->raid_disks;
  96. else
  97. j = 1;
  98. while(j--) {
  99. bio = r1_bio->bios[j];
  100. for (i = 0; i < RESYNC_PAGES; i++) {
  101. page = alloc_page(gfp_flags);
  102. if (unlikely(!page))
  103. goto out_free_pages;
  104. bio->bi_io_vec[i].bv_page = page;
  105. bio->bi_vcnt = i+1;
  106. }
  107. }
  108. /* If not user-requests, copy the page pointers to all bios */
  109. if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) {
  110. for (i=0; i<RESYNC_PAGES ; i++)
  111. for (j=1; j<pi->raid_disks; j++)
  112. r1_bio->bios[j]->bi_io_vec[i].bv_page =
  113. r1_bio->bios[0]->bi_io_vec[i].bv_page;
  114. }
  115. r1_bio->master_bio = NULL;
  116. return r1_bio;
  117. out_free_pages:
  118. for (j=0 ; j < pi->raid_disks; j++)
  119. for (i=0; i < r1_bio->bios[j]->bi_vcnt ; i++)
  120. put_page(r1_bio->bios[j]->bi_io_vec[i].bv_page);
  121. j = -1;
  122. out_free_bio:
  123. while (++j < pi->raid_disks)
  124. bio_put(r1_bio->bios[j]);
  125. r1bio_pool_free(r1_bio, data);
  126. return NULL;
  127. }
  128. static void r1buf_pool_free(void *__r1_bio, void *data)
  129. {
  130. struct pool_info *pi = data;
  131. int i,j;
  132. struct r1bio *r1bio = __r1_bio;
  133. for (i = 0; i < RESYNC_PAGES; i++)
  134. for (j = pi->raid_disks; j-- ;) {
  135. if (j == 0 ||
  136. r1bio->bios[j]->bi_io_vec[i].bv_page !=
  137. r1bio->bios[0]->bi_io_vec[i].bv_page)
  138. safe_put_page(r1bio->bios[j]->bi_io_vec[i].bv_page);
  139. }
  140. for (i=0 ; i < pi->raid_disks; i++)
  141. bio_put(r1bio->bios[i]);
  142. r1bio_pool_free(r1bio, data);
  143. }
  144. static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio)
  145. {
  146. int i;
  147. for (i = 0; i < conf->raid_disks * 2; i++) {
  148. struct bio **bio = r1_bio->bios + i;
  149. if (!BIO_SPECIAL(*bio))
  150. bio_put(*bio);
  151. *bio = NULL;
  152. }
  153. }
  154. static void free_r1bio(struct r1bio *r1_bio)
  155. {
  156. struct r1conf *conf = r1_bio->mddev->private;
  157. put_all_bios(conf, r1_bio);
  158. mempool_free(r1_bio, conf->r1bio_pool);
  159. }
  160. static void put_buf(struct r1bio *r1_bio)
  161. {
  162. struct r1conf *conf = r1_bio->mddev->private;
  163. int i;
  164. for (i = 0; i < conf->raid_disks * 2; i++) {
  165. struct bio *bio = r1_bio->bios[i];
  166. if (bio->bi_end_io)
  167. rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
  168. }
  169. mempool_free(r1_bio, conf->r1buf_pool);
  170. lower_barrier(conf);
  171. }
  172. static void reschedule_retry(struct r1bio *r1_bio)
  173. {
  174. unsigned long flags;
  175. struct mddev *mddev = r1_bio->mddev;
  176. struct r1conf *conf = mddev->private;
  177. spin_lock_irqsave(&conf->device_lock, flags);
  178. list_add(&r1_bio->retry_list, &conf->retry_list);
  179. conf->nr_queued ++;
  180. spin_unlock_irqrestore(&conf->device_lock, flags);
  181. wake_up(&conf->wait_barrier);
  182. md_wakeup_thread(mddev->thread);
  183. }
  184. /*
  185. * raid_end_bio_io() is called when we have finished servicing a mirrored
  186. * operation and are ready to return a success/failure code to the buffer
  187. * cache layer.
  188. */
  189. static void call_bio_endio(struct r1bio *r1_bio)
  190. {
  191. struct bio *bio = r1_bio->master_bio;
  192. int done;
  193. struct r1conf *conf = r1_bio->mddev->private;
  194. if (bio->bi_phys_segments) {
  195. unsigned long flags;
  196. spin_lock_irqsave(&conf->device_lock, flags);
  197. bio->bi_phys_segments--;
  198. done = (bio->bi_phys_segments == 0);
  199. spin_unlock_irqrestore(&conf->device_lock, flags);
  200. } else
  201. done = 1;
  202. if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
  203. clear_bit(BIO_UPTODATE, &bio->bi_flags);
  204. if (done) {
  205. bio_endio(bio, 0);
  206. /*
  207. * Wake up any possible resync thread that waits for the device
  208. * to go idle.
  209. */
  210. allow_barrier(conf);
  211. }
  212. }
  213. static void raid_end_bio_io(struct r1bio *r1_bio)
  214. {
  215. struct bio *bio = r1_bio->master_bio;
  216. /* if nobody has done the final endio yet, do it now */
  217. if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
  218. pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
  219. (bio_data_dir(bio) == WRITE) ? "write" : "read",
  220. (unsigned long long) bio->bi_sector,
  221. (unsigned long long) bio->bi_sector +
  222. (bio->bi_size >> 9) - 1);
  223. call_bio_endio(r1_bio);
  224. }
  225. free_r1bio(r1_bio);
  226. }
  227. /*
  228. * Update disk head position estimator based on IRQ completion info.
  229. */
  230. static inline void update_head_pos(int disk, struct r1bio *r1_bio)
  231. {
  232. struct r1conf *conf = r1_bio->mddev->private;
  233. conf->mirrors[disk].head_position =
  234. r1_bio->sector + (r1_bio->sectors);
  235. }
  236. /*
  237. * Find the disk number which triggered given bio
  238. */
  239. static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
  240. {
  241. int mirror;
  242. struct r1conf *conf = r1_bio->mddev->private;
  243. int raid_disks = conf->raid_disks;
  244. for (mirror = 0; mirror < raid_disks * 2; mirror++)
  245. if (r1_bio->bios[mirror] == bio)
  246. break;
  247. BUG_ON(mirror == raid_disks * 2);
  248. update_head_pos(mirror, r1_bio);
  249. return mirror;
  250. }
  251. static void raid1_end_read_request(struct bio *bio, int error)
  252. {
  253. int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  254. struct r1bio *r1_bio = bio->bi_private;
  255. int mirror;
  256. struct r1conf *conf = r1_bio->mddev->private;
  257. mirror = r1_bio->read_disk;
  258. /*
  259. * this branch is our 'one mirror IO has finished' event handler:
  260. */
  261. update_head_pos(mirror, r1_bio);
  262. if (uptodate)
  263. set_bit(R1BIO_Uptodate, &r1_bio->state);
  264. else {
  265. /* If all other devices have failed, we want to return
  266. * the error upwards rather than fail the last device.
  267. * Here we redefine "uptodate" to mean "Don't want to retry"
  268. */
  269. unsigned long flags;
  270. spin_lock_irqsave(&conf->device_lock, flags);
  271. if (r1_bio->mddev->degraded == conf->raid_disks ||
  272. (r1_bio->mddev->degraded == conf->raid_disks-1 &&
  273. !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags)))
  274. uptodate = 1;
  275. spin_unlock_irqrestore(&conf->device_lock, flags);
  276. }
  277. if (uptodate)
  278. raid_end_bio_io(r1_bio);
  279. else {
  280. /*
  281. * oops, read error:
  282. */
  283. char b[BDEVNAME_SIZE];
  284. printk_ratelimited(
  285. KERN_ERR "md/raid1:%s: %s: "
  286. "rescheduling sector %llu\n",
  287. mdname(conf->mddev),
  288. bdevname(conf->mirrors[mirror].rdev->bdev,
  289. b),
  290. (unsigned long long)r1_bio->sector);
  291. set_bit(R1BIO_ReadError, &r1_bio->state);
  292. reschedule_retry(r1_bio);
  293. }
  294. rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
  295. }
  296. static void close_write(struct r1bio *r1_bio)
  297. {
  298. /* it really is the end of this request */
  299. if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
  300. /* free extra copy of the data pages */
  301. int i = r1_bio->behind_page_count;
  302. while (i--)
  303. safe_put_page(r1_bio->behind_bvecs[i].bv_page);
  304. kfree(r1_bio->behind_bvecs);
  305. r1_bio->behind_bvecs = NULL;
  306. }
  307. /* clear the bitmap if all writes complete successfully */
  308. bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
  309. r1_bio->sectors,
  310. !test_bit(R1BIO_Degraded, &r1_bio->state),
  311. test_bit(R1BIO_BehindIO, &r1_bio->state));
  312. md_write_end(r1_bio->mddev);
  313. }
  314. static void r1_bio_write_done(struct r1bio *r1_bio)
  315. {
  316. if (!atomic_dec_and_test(&r1_bio->remaining))
  317. return;
  318. if (test_bit(R1BIO_WriteError, &r1_bio->state))
  319. reschedule_retry(r1_bio);
  320. else {
  321. close_write(r1_bio);
  322. if (test_bit(R1BIO_MadeGood, &r1_bio->state))
  323. reschedule_retry(r1_bio);
  324. else
  325. raid_end_bio_io(r1_bio);
  326. }
  327. }
  328. static void raid1_end_write_request(struct bio *bio, int error)
  329. {
  330. int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  331. struct r1bio *r1_bio = bio->bi_private;
  332. int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
  333. struct r1conf *conf = r1_bio->mddev->private;
  334. struct bio *to_put = NULL;
  335. mirror = find_bio_disk(r1_bio, bio);
  336. /*
  337. * 'one mirror IO has finished' event handler:
  338. */
  339. if (!uptodate) {
  340. set_bit(WriteErrorSeen,
  341. &conf->mirrors[mirror].rdev->flags);
  342. if (!test_and_set_bit(WantReplacement,
  343. &conf->mirrors[mirror].rdev->flags))
  344. set_bit(MD_RECOVERY_NEEDED, &
  345. conf->mddev->recovery);
  346. set_bit(R1BIO_WriteError, &r1_bio->state);
  347. } else {
  348. /*
  349. * Set R1BIO_Uptodate in our master bio, so that we
  350. * will return a good error code for to the higher
  351. * levels even if IO on some other mirrored buffer
  352. * fails.
  353. *
  354. * The 'master' represents the composite IO operation
  355. * to user-side. So if something waits for IO, then it
  356. * will wait for the 'master' bio.
  357. */
  358. sector_t first_bad;
  359. int bad_sectors;
  360. r1_bio->bios[mirror] = NULL;
  361. to_put = bio;
  362. set_bit(R1BIO_Uptodate, &r1_bio->state);
  363. /* Maybe we can clear some bad blocks. */
  364. if (is_badblock(conf->mirrors[mirror].rdev,
  365. r1_bio->sector, r1_bio->sectors,
  366. &first_bad, &bad_sectors)) {
  367. r1_bio->bios[mirror] = IO_MADE_GOOD;
  368. set_bit(R1BIO_MadeGood, &r1_bio->state);
  369. }
  370. }
  371. if (behind) {
  372. if (test_bit(WriteMostly, &conf->mirrors[mirror].rdev->flags))
  373. atomic_dec(&r1_bio->behind_remaining);
  374. /*
  375. * In behind mode, we ACK the master bio once the I/O
  376. * has safely reached all non-writemostly
  377. * disks. Setting the Returned bit ensures that this
  378. * gets done only once -- we don't ever want to return
  379. * -EIO here, instead we'll wait
  380. */
  381. if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) &&
  382. test_bit(R1BIO_Uptodate, &r1_bio->state)) {
  383. /* Maybe we can return now */
  384. if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
  385. struct bio *mbio = r1_bio->master_bio;
  386. pr_debug("raid1: behind end write sectors"
  387. " %llu-%llu\n",
  388. (unsigned long long) mbio->bi_sector,
  389. (unsigned long long) mbio->bi_sector +
  390. (mbio->bi_size >> 9) - 1);
  391. call_bio_endio(r1_bio);
  392. }
  393. }
  394. }
  395. if (r1_bio->bios[mirror] == NULL)
  396. rdev_dec_pending(conf->mirrors[mirror].rdev,
  397. conf->mddev);
  398. /*
  399. * Let's see if all mirrored write operations have finished
  400. * already.
  401. */
  402. r1_bio_write_done(r1_bio);
  403. if (to_put)
  404. bio_put(to_put);
  405. }
  406. /*
  407. * This routine returns the disk from which the requested read should
  408. * be done. There is a per-array 'next expected sequential IO' sector
  409. * number - if this matches on the next IO then we use the last disk.
  410. * There is also a per-disk 'last know head position' sector that is
  411. * maintained from IRQ contexts, both the normal and the resync IO
  412. * completion handlers update this position correctly. If there is no
  413. * perfect sequential match then we pick the disk whose head is closest.
  414. *
  415. * If there are 2 mirrors in the same 2 devices, performance degrades
  416. * because position is mirror, not device based.
  417. *
  418. * The rdev for the device selected will have nr_pending incremented.
  419. */
  420. static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors)
  421. {
  422. const sector_t this_sector = r1_bio->sector;
  423. int sectors;
  424. int best_good_sectors;
  425. int start_disk;
  426. int best_disk;
  427. int i;
  428. sector_t best_dist;
  429. struct md_rdev *rdev;
  430. int choose_first;
  431. rcu_read_lock();
  432. /*
  433. * Check if we can balance. We can balance on the whole
  434. * device if no resync is going on, or below the resync window.
  435. * We take the first readable disk when above the resync window.
  436. */
  437. retry:
  438. sectors = r1_bio->sectors;
  439. best_disk = -1;
  440. best_dist = MaxSector;
  441. best_good_sectors = 0;
  442. if (conf->mddev->recovery_cp < MaxSector &&
  443. (this_sector + sectors >= conf->next_resync)) {
  444. choose_first = 1;
  445. start_disk = 0;
  446. } else {
  447. choose_first = 0;
  448. start_disk = conf->last_used;
  449. }
  450. for (i = 0 ; i < conf->raid_disks * 2 ; i++) {
  451. sector_t dist;
  452. sector_t first_bad;
  453. int bad_sectors;
  454. int disk = start_disk + i;
  455. if (disk >= conf->raid_disks * 2)
  456. disk -= conf->raid_disks * 2;
  457. rdev = rcu_dereference(conf->mirrors[disk].rdev);
  458. if (r1_bio->bios[disk] == IO_BLOCKED
  459. || rdev == NULL
  460. || test_bit(Unmerged, &rdev->flags)
  461. || test_bit(Faulty, &rdev->flags))
  462. continue;
  463. if (!test_bit(In_sync, &rdev->flags) &&
  464. rdev->recovery_offset < this_sector + sectors)
  465. continue;
  466. if (test_bit(WriteMostly, &rdev->flags)) {
  467. /* Don't balance among write-mostly, just
  468. * use the first as a last resort */
  469. if (best_disk < 0) {
  470. if (is_badblock(rdev, this_sector, sectors,
  471. &first_bad, &bad_sectors)) {
  472. if (first_bad < this_sector)
  473. /* Cannot use this */
  474. continue;
  475. best_good_sectors = first_bad - this_sector;
  476. } else
  477. best_good_sectors = sectors;
  478. best_disk = disk;
  479. }
  480. continue;
  481. }
  482. /* This is a reasonable device to use. It might
  483. * even be best.
  484. */
  485. if (is_badblock(rdev, this_sector, sectors,
  486. &first_bad, &bad_sectors)) {
  487. if (best_dist < MaxSector)
  488. /* already have a better device */
  489. continue;
  490. if (first_bad <= this_sector) {
  491. /* cannot read here. If this is the 'primary'
  492. * device, then we must not read beyond
  493. * bad_sectors from another device..
  494. */
  495. bad_sectors -= (this_sector - first_bad);
  496. if (choose_first && sectors > bad_sectors)
  497. sectors = bad_sectors;
  498. if (best_good_sectors > sectors)
  499. best_good_sectors = sectors;
  500. } else {
  501. sector_t good_sectors = first_bad - this_sector;
  502. if (good_sectors > best_good_sectors) {
  503. best_good_sectors = good_sectors;
  504. best_disk = disk;
  505. }
  506. if (choose_first)
  507. break;
  508. }
  509. continue;
  510. } else
  511. best_good_sectors = sectors;
  512. dist = abs(this_sector - conf->mirrors[disk].head_position);
  513. if (choose_first
  514. /* Don't change to another disk for sequential reads */
  515. || conf->next_seq_sect == this_sector
  516. || dist == 0
  517. /* If device is idle, use it */
  518. || atomic_read(&rdev->nr_pending) == 0) {
  519. best_disk = disk;
  520. break;
  521. }
  522. if (dist < best_dist) {
  523. best_dist = dist;
  524. best_disk = disk;
  525. }
  526. }
  527. if (best_disk >= 0) {
  528. rdev = rcu_dereference(conf->mirrors[best_disk].rdev);
  529. if (!rdev)
  530. goto retry;
  531. atomic_inc(&rdev->nr_pending);
  532. if (test_bit(Faulty, &rdev->flags)) {
  533. /* cannot risk returning a device that failed
  534. * before we inc'ed nr_pending
  535. */
  536. rdev_dec_pending(rdev, conf->mddev);
  537. goto retry;
  538. }
  539. sectors = best_good_sectors;
  540. conf->next_seq_sect = this_sector + sectors;
  541. conf->last_used = best_disk;
  542. }
  543. rcu_read_unlock();
  544. *max_sectors = sectors;
  545. return best_disk;
  546. }
  547. static int raid1_mergeable_bvec(struct request_queue *q,
  548. struct bvec_merge_data *bvm,
  549. struct bio_vec *biovec)
  550. {
  551. struct mddev *mddev = q->queuedata;
  552. struct r1conf *conf = mddev->private;
  553. sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
  554. int max = biovec->bv_len;
  555. if (mddev->merge_check_needed) {
  556. int disk;
  557. rcu_read_lock();
  558. for (disk = 0; disk < conf->raid_disks * 2; disk++) {
  559. struct md_rdev *rdev = rcu_dereference(
  560. conf->mirrors[disk].rdev);
  561. if (rdev && !test_bit(Faulty, &rdev->flags)) {
  562. struct request_queue *q =
  563. bdev_get_queue(rdev->bdev);
  564. if (q->merge_bvec_fn) {
  565. bvm->bi_sector = sector +
  566. rdev->data_offset;
  567. bvm->bi_bdev = rdev->bdev;
  568. max = min(max, q->merge_bvec_fn(
  569. q, bvm, biovec));
  570. }
  571. }
  572. }
  573. rcu_read_unlock();
  574. }
  575. return max;
  576. }
  577. int md_raid1_congested(struct mddev *mddev, int bits)
  578. {
  579. struct r1conf *conf = mddev->private;
  580. int i, ret = 0;
  581. if ((bits & (1 << BDI_async_congested)) &&
  582. conf->pending_count >= max_queued_requests)
  583. return 1;
  584. rcu_read_lock();
  585. for (i = 0; i < conf->raid_disks * 2; i++) {
  586. struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
  587. if (rdev && !test_bit(Faulty, &rdev->flags)) {
  588. struct request_queue *q = bdev_get_queue(rdev->bdev);
  589. BUG_ON(!q);
  590. /* Note the '|| 1' - when read_balance prefers
  591. * non-congested targets, it can be removed
  592. */
  593. if ((bits & (1<<BDI_async_congested)) || 1)
  594. ret |= bdi_congested(&q->backing_dev_info, bits);
  595. else
  596. ret &= bdi_congested(&q->backing_dev_info, bits);
  597. }
  598. }
  599. rcu_read_unlock();
  600. return ret;
  601. }
  602. EXPORT_SYMBOL_GPL(md_raid1_congested);
  603. static int raid1_congested(void *data, int bits)
  604. {
  605. struct mddev *mddev = data;
  606. return mddev_congested(mddev, bits) ||
  607. md_raid1_congested(mddev, bits);
  608. }
  609. static void flush_pending_writes(struct r1conf *conf)
  610. {
  611. /* Any writes that have been queued but are awaiting
  612. * bitmap updates get flushed here.
  613. */
  614. spin_lock_irq(&conf->device_lock);
  615. if (conf->pending_bio_list.head) {
  616. struct bio *bio;
  617. bio = bio_list_get(&conf->pending_bio_list);
  618. conf->pending_count = 0;
  619. spin_unlock_irq(&conf->device_lock);
  620. /* flush any pending bitmap writes to
  621. * disk before proceeding w/ I/O */
  622. bitmap_unplug(conf->mddev->bitmap);
  623. wake_up(&conf->wait_barrier);
  624. while (bio) { /* submit pending writes */
  625. struct bio *next = bio->bi_next;
  626. bio->bi_next = NULL;
  627. generic_make_request(bio);
  628. bio = next;
  629. }
  630. } else
  631. spin_unlock_irq(&conf->device_lock);
  632. }
  633. /* Barriers....
  634. * Sometimes we need to suspend IO while we do something else,
  635. * either some resync/recovery, or reconfigure the array.
  636. * To do this we raise a 'barrier'.
  637. * The 'barrier' is a counter that can be raised multiple times
  638. * to count how many activities are happening which preclude
  639. * normal IO.
  640. * We can only raise the barrier if there is no pending IO.
  641. * i.e. if nr_pending == 0.
  642. * We choose only to raise the barrier if no-one is waiting for the
  643. * barrier to go down. This means that as soon as an IO request
  644. * is ready, no other operations which require a barrier will start
  645. * until the IO request has had a chance.
  646. *
  647. * So: regular IO calls 'wait_barrier'. When that returns there
  648. * is no backgroup IO happening, It must arrange to call
  649. * allow_barrier when it has finished its IO.
  650. * backgroup IO calls must call raise_barrier. Once that returns
  651. * there is no normal IO happeing. It must arrange to call
  652. * lower_barrier when the particular background IO completes.
  653. */
  654. #define RESYNC_DEPTH 32
  655. static void raise_barrier(struct r1conf *conf)
  656. {
  657. spin_lock_irq(&conf->resync_lock);
  658. /* Wait until no block IO is waiting */
  659. wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
  660. conf->resync_lock, );
  661. /* block any new IO from starting */
  662. conf->barrier++;
  663. /* Now wait for all pending IO to complete */
  664. wait_event_lock_irq(conf->wait_barrier,
  665. !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
  666. conf->resync_lock, );
  667. spin_unlock_irq(&conf->resync_lock);
  668. }
  669. static void lower_barrier(struct r1conf *conf)
  670. {
  671. unsigned long flags;
  672. BUG_ON(conf->barrier <= 0);
  673. spin_lock_irqsave(&conf->resync_lock, flags);
  674. conf->barrier--;
  675. spin_unlock_irqrestore(&conf->resync_lock, flags);
  676. wake_up(&conf->wait_barrier);
  677. }
  678. static void wait_barrier(struct r1conf *conf)
  679. {
  680. spin_lock_irq(&conf->resync_lock);
  681. if (conf->barrier) {
  682. conf->nr_waiting++;
  683. /* Wait for the barrier to drop.
  684. * However if there are already pending
  685. * requests (preventing the barrier from
  686. * rising completely), and the
  687. * pre-process bio queue isn't empty,
  688. * then don't wait, as we need to empty
  689. * that queue to get the nr_pending
  690. * count down.
  691. */
  692. wait_event_lock_irq(conf->wait_barrier,
  693. !conf->barrier ||
  694. (conf->nr_pending &&
  695. current->bio_list &&
  696. !bio_list_empty(current->bio_list)),
  697. conf->resync_lock,
  698. );
  699. conf->nr_waiting--;
  700. }
  701. conf->nr_pending++;
  702. spin_unlock_irq(&conf->resync_lock);
  703. }
  704. static void allow_barrier(struct r1conf *conf)
  705. {
  706. unsigned long flags;
  707. spin_lock_irqsave(&conf->resync_lock, flags);
  708. conf->nr_pending--;
  709. spin_unlock_irqrestore(&conf->resync_lock, flags);
  710. wake_up(&conf->wait_barrier);
  711. }
  712. static void freeze_array(struct r1conf *conf)
  713. {
  714. /* stop syncio and normal IO and wait for everything to
  715. * go quite.
  716. * We increment barrier and nr_waiting, and then
  717. * wait until nr_pending match nr_queued+1
  718. * This is called in the context of one normal IO request
  719. * that has failed. Thus any sync request that might be pending
  720. * will be blocked by nr_pending, and we need to wait for
  721. * pending IO requests to complete or be queued for re-try.
  722. * Thus the number queued (nr_queued) plus this request (1)
  723. * must match the number of pending IOs (nr_pending) before
  724. * we continue.
  725. */
  726. spin_lock_irq(&conf->resync_lock);
  727. conf->barrier++;
  728. conf->nr_waiting++;
  729. wait_event_lock_irq(conf->wait_barrier,
  730. conf->nr_pending == conf->nr_queued+1,
  731. conf->resync_lock,
  732. flush_pending_writes(conf));
  733. spin_unlock_irq(&conf->resync_lock);
  734. }
  735. static void unfreeze_array(struct r1conf *conf)
  736. {
  737. /* reverse the effect of the freeze */
  738. spin_lock_irq(&conf->resync_lock);
  739. conf->barrier--;
  740. conf->nr_waiting--;
  741. wake_up(&conf->wait_barrier);
  742. spin_unlock_irq(&conf->resync_lock);
  743. }
  744. /* duplicate the data pages for behind I/O
  745. */
  746. static void alloc_behind_pages(struct bio *bio, struct r1bio *r1_bio)
  747. {
  748. int i;
  749. struct bio_vec *bvec;
  750. struct bio_vec *bvecs = kzalloc(bio->bi_vcnt * sizeof(struct bio_vec),
  751. GFP_NOIO);
  752. if (unlikely(!bvecs))
  753. return;
  754. bio_for_each_segment(bvec, bio, i) {
  755. bvecs[i] = *bvec;
  756. bvecs[i].bv_page = alloc_page(GFP_NOIO);
  757. if (unlikely(!bvecs[i].bv_page))
  758. goto do_sync_io;
  759. memcpy(kmap(bvecs[i].bv_page) + bvec->bv_offset,
  760. kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len);
  761. kunmap(bvecs[i].bv_page);
  762. kunmap(bvec->bv_page);
  763. }
  764. r1_bio->behind_bvecs = bvecs;
  765. r1_bio->behind_page_count = bio->bi_vcnt;
  766. set_bit(R1BIO_BehindIO, &r1_bio->state);
  767. return;
  768. do_sync_io:
  769. for (i = 0; i < bio->bi_vcnt; i++)
  770. if (bvecs[i].bv_page)
  771. put_page(bvecs[i].bv_page);
  772. kfree(bvecs);
  773. pr_debug("%dB behind alloc failed, doing sync I/O\n", bio->bi_size);
  774. }
  775. static void make_request(struct mddev *mddev, struct bio * bio)
  776. {
  777. struct r1conf *conf = mddev->private;
  778. struct mirror_info *mirror;
  779. struct r1bio *r1_bio;
  780. struct bio *read_bio;
  781. int i, disks;
  782. struct bitmap *bitmap;
  783. unsigned long flags;
  784. const int rw = bio_data_dir(bio);
  785. const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
  786. const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
  787. struct md_rdev *blocked_rdev;
  788. int first_clone;
  789. int sectors_handled;
  790. int max_sectors;
  791. /*
  792. * Register the new request and wait if the reconstruction
  793. * thread has put up a bar for new requests.
  794. * Continue immediately if no resync is active currently.
  795. */
  796. md_write_start(mddev, bio); /* wait on superblock update early */
  797. if (bio_data_dir(bio) == WRITE &&
  798. bio->bi_sector + bio->bi_size/512 > mddev->suspend_lo &&
  799. bio->bi_sector < mddev->suspend_hi) {
  800. /* As the suspend_* range is controlled by
  801. * userspace, we want an interruptible
  802. * wait.
  803. */
  804. DEFINE_WAIT(w);
  805. for (;;) {
  806. flush_signals(current);
  807. prepare_to_wait(&conf->wait_barrier,
  808. &w, TASK_INTERRUPTIBLE);
  809. if (bio->bi_sector + bio->bi_size/512 <= mddev->suspend_lo ||
  810. bio->bi_sector >= mddev->suspend_hi)
  811. break;
  812. schedule();
  813. }
  814. finish_wait(&conf->wait_barrier, &w);
  815. }
  816. wait_barrier(conf);
  817. bitmap = mddev->bitmap;
  818. /*
  819. * make_request() can abort the operation when READA is being
  820. * used and no empty request is available.
  821. *
  822. */
  823. r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
  824. r1_bio->master_bio = bio;
  825. r1_bio->sectors = bio->bi_size >> 9;
  826. r1_bio->state = 0;
  827. r1_bio->mddev = mddev;
  828. r1_bio->sector = bio->bi_sector;
  829. /* We might need to issue multiple reads to different
  830. * devices if there are bad blocks around, so we keep
  831. * track of the number of reads in bio->bi_phys_segments.
  832. * If this is 0, there is only one r1_bio and no locking
  833. * will be needed when requests complete. If it is
  834. * non-zero, then it is the number of not-completed requests.
  835. */
  836. bio->bi_phys_segments = 0;
  837. clear_bit(BIO_SEG_VALID, &bio->bi_flags);
  838. if (rw == READ) {
  839. /*
  840. * read balancing logic:
  841. */
  842. int rdisk;
  843. read_again:
  844. rdisk = read_balance(conf, r1_bio, &max_sectors);
  845. if (rdisk < 0) {
  846. /* couldn't find anywhere to read from */
  847. raid_end_bio_io(r1_bio);
  848. return;
  849. }
  850. mirror = conf->mirrors + rdisk;
  851. if (test_bit(WriteMostly, &mirror->rdev->flags) &&
  852. bitmap) {
  853. /* Reading from a write-mostly device must
  854. * take care not to over-take any writes
  855. * that are 'behind'
  856. */
  857. wait_event(bitmap->behind_wait,
  858. atomic_read(&bitmap->behind_writes) == 0);
  859. }
  860. r1_bio->read_disk = rdisk;
  861. read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
  862. md_trim_bio(read_bio, r1_bio->sector - bio->bi_sector,
  863. max_sectors);
  864. r1_bio->bios[rdisk] = read_bio;
  865. read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset;
  866. read_bio->bi_bdev = mirror->rdev->bdev;
  867. read_bio->bi_end_io = raid1_end_read_request;
  868. read_bio->bi_rw = READ | do_sync;
  869. read_bio->bi_private = r1_bio;
  870. if (max_sectors < r1_bio->sectors) {
  871. /* could not read all from this device, so we will
  872. * need another r1_bio.
  873. */
  874. sectors_handled = (r1_bio->sector + max_sectors
  875. - bio->bi_sector);
  876. r1_bio->sectors = max_sectors;
  877. spin_lock_irq(&conf->device_lock);
  878. if (bio->bi_phys_segments == 0)
  879. bio->bi_phys_segments = 2;
  880. else
  881. bio->bi_phys_segments++;
  882. spin_unlock_irq(&conf->device_lock);
  883. /* Cannot call generic_make_request directly
  884. * as that will be queued in __make_request
  885. * and subsequent mempool_alloc might block waiting
  886. * for it. So hand bio over to raid1d.
  887. */
  888. reschedule_retry(r1_bio);
  889. r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
  890. r1_bio->master_bio = bio;
  891. r1_bio->sectors = (bio->bi_size >> 9) - sectors_handled;
  892. r1_bio->state = 0;
  893. r1_bio->mddev = mddev;
  894. r1_bio->sector = bio->bi_sector + sectors_handled;
  895. goto read_again;
  896. } else
  897. generic_make_request(read_bio);
  898. return;
  899. }
  900. /*
  901. * WRITE:
  902. */
  903. if (conf->pending_count >= max_queued_requests) {
  904. md_wakeup_thread(mddev->thread);
  905. wait_event(conf->wait_barrier,
  906. conf->pending_count < max_queued_requests);
  907. }
  908. /* first select target devices under rcu_lock and
  909. * inc refcount on their rdev. Record them by setting
  910. * bios[x] to bio
  911. * If there are known/acknowledged bad blocks on any device on
  912. * which we have seen a write error, we want to avoid writing those
  913. * blocks.
  914. * This potentially requires several writes to write around
  915. * the bad blocks. Each set of writes gets it's own r1bio
  916. * with a set of bios attached.
  917. */
  918. disks = conf->raid_disks * 2;
  919. retry_write:
  920. blocked_rdev = NULL;
  921. rcu_read_lock();
  922. max_sectors = r1_bio->sectors;
  923. for (i = 0; i < disks; i++) {
  924. struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
  925. if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
  926. atomic_inc(&rdev->nr_pending);
  927. blocked_rdev = rdev;
  928. break;
  929. }
  930. r1_bio->bios[i] = NULL;
  931. if (!rdev || test_bit(Faulty, &rdev->flags)
  932. || test_bit(Unmerged, &rdev->flags)) {
  933. if (i < conf->raid_disks)
  934. set_bit(R1BIO_Degraded, &r1_bio->state);
  935. continue;
  936. }
  937. atomic_inc(&rdev->nr_pending);
  938. if (test_bit(WriteErrorSeen, &rdev->flags)) {
  939. sector_t first_bad;
  940. int bad_sectors;
  941. int is_bad;
  942. is_bad = is_badblock(rdev, r1_bio->sector,
  943. max_sectors,
  944. &first_bad, &bad_sectors);
  945. if (is_bad < 0) {
  946. /* mustn't write here until the bad block is
  947. * acknowledged*/
  948. set_bit(BlockedBadBlocks, &rdev->flags);
  949. blocked_rdev = rdev;
  950. break;
  951. }
  952. if (is_bad && first_bad <= r1_bio->sector) {
  953. /* Cannot write here at all */
  954. bad_sectors -= (r1_bio->sector - first_bad);
  955. if (bad_sectors < max_sectors)
  956. /* mustn't write more than bad_sectors
  957. * to other devices yet
  958. */
  959. max_sectors = bad_sectors;
  960. rdev_dec_pending(rdev, mddev);
  961. /* We don't set R1BIO_Degraded as that
  962. * only applies if the disk is
  963. * missing, so it might be re-added,
  964. * and we want to know to recover this
  965. * chunk.
  966. * In this case the device is here,
  967. * and the fact that this chunk is not
  968. * in-sync is recorded in the bad
  969. * block log
  970. */
  971. continue;
  972. }
  973. if (is_bad) {
  974. int good_sectors = first_bad - r1_bio->sector;
  975. if (good_sectors < max_sectors)
  976. max_sectors = good_sectors;
  977. }
  978. }
  979. r1_bio->bios[i] = bio;
  980. }
  981. rcu_read_unlock();
  982. if (unlikely(blocked_rdev)) {
  983. /* Wait for this device to become unblocked */
  984. int j;
  985. for (j = 0; j < i; j++)
  986. if (r1_bio->bios[j])
  987. rdev_dec_pending(conf->mirrors[j].rdev, mddev);
  988. r1_bio->state = 0;
  989. allow_barrier(conf);
  990. md_wait_for_blocked_rdev(blocked_rdev, mddev);
  991. wait_barrier(conf);
  992. goto retry_write;
  993. }
  994. if (max_sectors < r1_bio->sectors) {
  995. /* We are splitting this write into multiple parts, so
  996. * we need to prepare for allocating another r1_bio.
  997. */
  998. r1_bio->sectors = max_sectors;
  999. spin_lock_irq(&conf->device_lock);
  1000. if (bio->bi_phys_segments == 0)
  1001. bio->bi_phys_segments = 2;
  1002. else
  1003. bio->bi_phys_segments++;
  1004. spin_unlock_irq(&conf->device_lock);
  1005. }
  1006. sectors_handled = r1_bio->sector + max_sectors - bio->bi_sector;
  1007. atomic_set(&r1_bio->remaining, 1);
  1008. atomic_set(&r1_bio->behind_remaining, 0);
  1009. first_clone = 1;
  1010. for (i = 0; i < disks; i++) {
  1011. struct bio *mbio;
  1012. if (!r1_bio->bios[i])
  1013. continue;
  1014. mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
  1015. md_trim_bio(mbio, r1_bio->sector - bio->bi_sector, max_sectors);
  1016. if (first_clone) {
  1017. /* do behind I/O ?
  1018. * Not if there are too many, or cannot
  1019. * allocate memory, or a reader on WriteMostly
  1020. * is waiting for behind writes to flush */
  1021. if (bitmap &&
  1022. (atomic_read(&bitmap->behind_writes)
  1023. < mddev->bitmap_info.max_write_behind) &&
  1024. !waitqueue_active(&bitmap->behind_wait))
  1025. alloc_behind_pages(mbio, r1_bio);
  1026. bitmap_startwrite(bitmap, r1_bio->sector,
  1027. r1_bio->sectors,
  1028. test_bit(R1BIO_BehindIO,
  1029. &r1_bio->state));
  1030. first_clone = 0;
  1031. }
  1032. if (r1_bio->behind_bvecs) {
  1033. struct bio_vec *bvec;
  1034. int j;
  1035. /* Yes, I really want the '__' version so that
  1036. * we clear any unused pointer in the io_vec, rather
  1037. * than leave them unchanged. This is important
  1038. * because when we come to free the pages, we won't
  1039. * know the original bi_idx, so we just free
  1040. * them all
  1041. */
  1042. __bio_for_each_segment(bvec, mbio, j, 0)
  1043. bvec->bv_page = r1_bio->behind_bvecs[j].bv_page;
  1044. if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
  1045. atomic_inc(&r1_bio->behind_remaining);
  1046. }
  1047. r1_bio->bios[i] = mbio;
  1048. mbio->bi_sector = (r1_bio->sector +
  1049. conf->mirrors[i].rdev->data_offset);
  1050. mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
  1051. mbio->bi_end_io = raid1_end_write_request;
  1052. mbio->bi_rw = WRITE | do_flush_fua | do_sync;
  1053. mbio->bi_private = r1_bio;
  1054. atomic_inc(&r1_bio->remaining);
  1055. spin_lock_irqsave(&conf->device_lock, flags);
  1056. bio_list_add(&conf->pending_bio_list, mbio);
  1057. conf->pending_count++;
  1058. spin_unlock_irqrestore(&conf->device_lock, flags);
  1059. if (!mddev_check_plugged(mddev))
  1060. md_wakeup_thread(mddev->thread);
  1061. }
  1062. /* Mustn't call r1_bio_write_done before this next test,
  1063. * as it could result in the bio being freed.
  1064. */
  1065. if (sectors_handled < (bio->bi_size >> 9)) {
  1066. r1_bio_write_done(r1_bio);
  1067. /* We need another r1_bio. It has already been counted
  1068. * in bio->bi_phys_segments
  1069. */
  1070. r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
  1071. r1_bio->master_bio = bio;
  1072. r1_bio->sectors = (bio->bi_size >> 9) - sectors_handled;
  1073. r1_bio->state = 0;
  1074. r1_bio->mddev = mddev;
  1075. r1_bio->sector = bio->bi_sector + sectors_handled;
  1076. goto retry_write;
  1077. }
  1078. r1_bio_write_done(r1_bio);
  1079. /* In case raid1d snuck in to freeze_array */
  1080. wake_up(&conf->wait_barrier);
  1081. }
  1082. static void status(struct seq_file *seq, struct mddev *mddev)
  1083. {
  1084. struct r1conf *conf = mddev->private;
  1085. int i;
  1086. seq_printf(seq, " [%d/%d] [", conf->raid_disks,
  1087. conf->raid_disks - mddev->degraded);
  1088. rcu_read_lock();
  1089. for (i = 0; i < conf->raid_disks; i++) {
  1090. struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
  1091. seq_printf(seq, "%s",
  1092. rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
  1093. }
  1094. rcu_read_unlock();
  1095. seq_printf(seq, "]");
  1096. }
  1097. static void error(struct mddev *mddev, struct md_rdev *rdev)
  1098. {
  1099. char b[BDEVNAME_SIZE];
  1100. struct r1conf *conf = mddev->private;
  1101. /*
  1102. * If it is not operational, then we have already marked it as dead
  1103. * else if it is the last working disks, ignore the error, let the
  1104. * next level up know.
  1105. * else mark the drive as failed
  1106. */
  1107. if (test_bit(In_sync, &rdev->flags)
  1108. && (conf->raid_disks - mddev->degraded) == 1) {
  1109. /*
  1110. * Don't fail the drive, act as though we were just a
  1111. * normal single drive.
  1112. * However don't try a recovery from this drive as
  1113. * it is very likely to fail.
  1114. */
  1115. conf->recovery_disabled = mddev->recovery_disabled;
  1116. return;
  1117. }
  1118. set_bit(Blocked, &rdev->flags);
  1119. if (test_and_clear_bit(In_sync, &rdev->flags)) {
  1120. unsigned long flags;
  1121. spin_lock_irqsave(&conf->device_lock, flags);
  1122. mddev->degraded++;
  1123. set_bit(Faulty, &rdev->flags);
  1124. spin_unlock_irqrestore(&conf->device_lock, flags);
  1125. /*
  1126. * if recovery is running, make sure it aborts.
  1127. */
  1128. set_bit(MD_RECOVERY_INTR, &mddev->recovery);
  1129. } else
  1130. set_bit(Faulty, &rdev->flags);
  1131. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  1132. printk(KERN_ALERT
  1133. "md/raid1:%s: Disk failure on %s, disabling device.\n"
  1134. "md/raid1:%s: Operation continuing on %d devices.\n",
  1135. mdname(mddev), bdevname(rdev->bdev, b),
  1136. mdname(mddev), conf->raid_disks - mddev->degraded);
  1137. }
  1138. static void print_conf(struct r1conf *conf)
  1139. {
  1140. int i;
  1141. printk(KERN_DEBUG "RAID1 conf printout:\n");
  1142. if (!conf) {
  1143. printk(KERN_DEBUG "(!conf)\n");
  1144. return;
  1145. }
  1146. printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
  1147. conf->raid_disks);
  1148. rcu_read_lock();
  1149. for (i = 0; i < conf->raid_disks; i++) {
  1150. char b[BDEVNAME_SIZE];
  1151. struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
  1152. if (rdev)
  1153. printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n",
  1154. i, !test_bit(In_sync, &rdev->flags),
  1155. !test_bit(Faulty, &rdev->flags),
  1156. bdevname(rdev->bdev,b));
  1157. }
  1158. rcu_read_unlock();
  1159. }
  1160. static void close_sync(struct r1conf *conf)
  1161. {
  1162. wait_barrier(conf);
  1163. allow_barrier(conf);
  1164. mempool_destroy(conf->r1buf_pool);
  1165. conf->r1buf_pool = NULL;
  1166. }
  1167. static int raid1_spare_active(struct mddev *mddev)
  1168. {
  1169. int i;
  1170. struct r1conf *conf = mddev->private;
  1171. int count = 0;
  1172. unsigned long flags;
  1173. /*
  1174. * Find all failed disks within the RAID1 configuration
  1175. * and mark them readable.
  1176. * Called under mddev lock, so rcu protection not needed.
  1177. */
  1178. for (i = 0; i < conf->raid_disks; i++) {
  1179. struct md_rdev *rdev = conf->mirrors[i].rdev;
  1180. struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
  1181. if (repl
  1182. && repl->recovery_offset == MaxSector
  1183. && !test_bit(Faulty, &repl->flags)
  1184. && !test_and_set_bit(In_sync, &repl->flags)) {
  1185. /* replacement has just become active */
  1186. if (!rdev ||
  1187. !test_and_clear_bit(In_sync, &rdev->flags))
  1188. count++;
  1189. if (rdev) {
  1190. /* Replaced device not technically
  1191. * faulty, but we need to be sure
  1192. * it gets removed and never re-added
  1193. */
  1194. set_bit(Faulty, &rdev->flags);
  1195. sysfs_notify_dirent_safe(
  1196. rdev->sysfs_state);
  1197. }
  1198. }
  1199. if (rdev
  1200. && !test_bit(Faulty, &rdev->flags)
  1201. && !test_and_set_bit(In_sync, &rdev->flags)) {
  1202. count++;
  1203. sysfs_notify_dirent_safe(rdev->sysfs_state);
  1204. }
  1205. }
  1206. spin_lock_irqsave(&conf->device_lock, flags);
  1207. mddev->degraded -= count;
  1208. spin_unlock_irqrestore(&conf->device_lock, flags);
  1209. print_conf(conf);
  1210. return count;
  1211. }
  1212. static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
  1213. {
  1214. struct r1conf *conf = mddev->private;
  1215. int err = -EEXIST;
  1216. int mirror = 0;
  1217. struct mirror_info *p;
  1218. int first = 0;
  1219. int last = conf->raid_disks - 1;
  1220. struct request_queue *q = bdev_get_queue(rdev->bdev);
  1221. if (mddev->recovery_disabled == conf->recovery_disabled)
  1222. return -EBUSY;
  1223. if (rdev->raid_disk >= 0)
  1224. first = last = rdev->raid_disk;
  1225. if (q->merge_bvec_fn) {
  1226. set_bit(Unmerged, &rdev->flags);
  1227. mddev->merge_check_needed = 1;
  1228. }
  1229. for (mirror = first; mirror <= last; mirror++) {
  1230. p = conf->mirrors+mirror;
  1231. if (!p->rdev) {
  1232. disk_stack_limits(mddev->gendisk, rdev->bdev,
  1233. rdev->data_offset << 9);
  1234. p->head_position = 0;
  1235. rdev->raid_disk = mirror;
  1236. err = 0;
  1237. /* As all devices are equivalent, we don't need a full recovery
  1238. * if this was recently any drive of the array
  1239. */
  1240. if (rdev->saved_raid_disk < 0)
  1241. conf->fullsync = 1;
  1242. rcu_assign_pointer(p->rdev, rdev);
  1243. break;
  1244. }
  1245. if (test_bit(WantReplacement, &p->rdev->flags) &&
  1246. p[conf->raid_disks].rdev == NULL) {
  1247. /* Add this device as a replacement */
  1248. clear_bit(In_sync, &rdev->flags);
  1249. set_bit(Replacement, &rdev->flags);
  1250. rdev->raid_disk = mirror;
  1251. err = 0;
  1252. conf->fullsync = 1;
  1253. rcu_assign_pointer(p[conf->raid_disks].rdev, rdev);
  1254. break;
  1255. }
  1256. }
  1257. if (err == 0 && test_bit(Unmerged, &rdev->flags)) {
  1258. /* Some requests might not have seen this new
  1259. * merge_bvec_fn. We must wait for them to complete
  1260. * before merging the device fully.
  1261. * First we make sure any code which has tested
  1262. * our function has submitted the request, then
  1263. * we wait for all outstanding requests to complete.
  1264. */
  1265. synchronize_sched();
  1266. raise_barrier(conf);
  1267. lower_barrier(conf);
  1268. clear_bit(Unmerged, &rdev->flags);
  1269. }
  1270. md_integrity_add_rdev(rdev, mddev);
  1271. print_conf(conf);
  1272. return err;
  1273. }
  1274. static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
  1275. {
  1276. struct r1conf *conf = mddev->private;
  1277. int err = 0;
  1278. int number = rdev->raid_disk;
  1279. struct mirror_info *p = conf->mirrors+ number;
  1280. if (rdev != p->rdev)
  1281. p = conf->mirrors + conf->raid_disks + number;
  1282. print_conf(conf);
  1283. if (rdev == p->rdev) {
  1284. if (test_bit(In_sync, &rdev->flags) ||
  1285. atomic_read(&rdev->nr_pending)) {
  1286. err = -EBUSY;
  1287. goto abort;
  1288. }
  1289. /* Only remove non-faulty devices if recovery
  1290. * is not possible.
  1291. */
  1292. if (!test_bit(Faulty, &rdev->flags) &&
  1293. mddev->recovery_disabled != conf->recovery_disabled &&
  1294. mddev->degraded < conf->raid_disks) {
  1295. err = -EBUSY;
  1296. goto abort;
  1297. }
  1298. p->rdev = NULL;
  1299. synchronize_rcu();
  1300. if (atomic_read(&rdev->nr_pending)) {
  1301. /* lost the race, try later */
  1302. err = -EBUSY;
  1303. p->rdev = rdev;
  1304. goto abort;
  1305. } else if (conf->mirrors[conf->raid_disks + number].rdev) {
  1306. /* We just removed a device that is being replaced.
  1307. * Move down the replacement. We drain all IO before
  1308. * doing this to avoid confusion.
  1309. */
  1310. struct md_rdev *repl =
  1311. conf->mirrors[conf->raid_disks + number].rdev;
  1312. raise_barrier(conf);
  1313. clear_bit(Replacement, &repl->flags);
  1314. p->rdev = repl;
  1315. conf->mirrors[conf->raid_disks + number].rdev = NULL;
  1316. lower_barrier(conf);
  1317. clear_bit(WantReplacement, &rdev->flags);
  1318. } else
  1319. clear_bit(WantReplacement, &rdev->flags);
  1320. err = md_integrity_register(mddev);
  1321. }
  1322. abort:
  1323. print_conf(conf);
  1324. return err;
  1325. }
  1326. static void end_sync_read(struct bio *bio, int error)
  1327. {
  1328. struct r1bio *r1_bio = bio->bi_private;
  1329. update_head_pos(r1_bio->read_disk, r1_bio);
  1330. /*
  1331. * we have read a block, now it needs to be re-written,
  1332. * or re-read if the read failed.
  1333. * We don't do much here, just schedule handling by raid1d
  1334. */
  1335. if (test_bit(BIO_UPTODATE, &bio->bi_flags))
  1336. set_bit(R1BIO_Uptodate, &r1_bio->state);
  1337. if (atomic_dec_and_test(&r1_bio->remaining))
  1338. reschedule_retry(r1_bio);
  1339. }
  1340. static void end_sync_write(struct bio *bio, int error)
  1341. {
  1342. int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  1343. struct r1bio *r1_bio = bio->bi_private;
  1344. struct mddev *mddev = r1_bio->mddev;
  1345. struct r1conf *conf = mddev->private;
  1346. int mirror=0;
  1347. sector_t first_bad;
  1348. int bad_sectors;
  1349. mirror = find_bio_disk(r1_bio, bio);
  1350. if (!uptodate) {
  1351. sector_t sync_blocks = 0;
  1352. sector_t s = r1_bio->sector;
  1353. long sectors_to_go = r1_bio->sectors;
  1354. /* make sure these bits doesn't get cleared. */
  1355. do {
  1356. bitmap_end_sync(mddev->bitmap, s,
  1357. &sync_blocks, 1);
  1358. s += sync_blocks;
  1359. sectors_to_go -= sync_blocks;
  1360. } while (sectors_to_go > 0);
  1361. set_bit(WriteErrorSeen,
  1362. &conf->mirrors[mirror].rdev->flags);
  1363. if (!test_and_set_bit(WantReplacement,
  1364. &conf->mirrors[mirror].rdev->flags))
  1365. set_bit(MD_RECOVERY_NEEDED, &
  1366. mddev->recovery);
  1367. set_bit(R1BIO_WriteError, &r1_bio->state);
  1368. } else if (is_badblock(conf->mirrors[mirror].rdev,
  1369. r1_bio->sector,
  1370. r1_bio->sectors,
  1371. &first_bad, &bad_sectors) &&
  1372. !is_badblock(conf->mirrors[r1_bio->read_disk].rdev,
  1373. r1_bio->sector,
  1374. r1_bio->sectors,
  1375. &first_bad, &bad_sectors)
  1376. )
  1377. set_bit(R1BIO_MadeGood, &r1_bio->state);
  1378. if (atomic_dec_and_test(&r1_bio->remaining)) {
  1379. int s = r1_bio->sectors;
  1380. if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
  1381. test_bit(R1BIO_WriteError, &r1_bio->state))
  1382. reschedule_retry(r1_bio);
  1383. else {
  1384. put_buf(r1_bio);
  1385. md_done_sync(mddev, s, uptodate);
  1386. }
  1387. }
  1388. }
  1389. static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
  1390. int sectors, struct page *page, int rw)
  1391. {
  1392. if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
  1393. /* success */
  1394. return 1;
  1395. if (rw == WRITE) {
  1396. set_bit(WriteErrorSeen, &rdev->flags);
  1397. if (!test_and_set_bit(WantReplacement,
  1398. &rdev->flags))
  1399. set_bit(MD_RECOVERY_NEEDED, &
  1400. rdev->mddev->recovery);
  1401. }
  1402. /* need to record an error - either for the block or the device */
  1403. if (!rdev_set_badblocks(rdev, sector, sectors, 0))
  1404. md_error(rdev->mddev, rdev);
  1405. return 0;
  1406. }
  1407. static int fix_sync_read_error(struct r1bio *r1_bio)
  1408. {
  1409. /* Try some synchronous reads of other devices to get
  1410. * good data, much like with normal read errors. Only
  1411. * read into the pages we already have so we don't
  1412. * need to re-issue the read request.
  1413. * We don't need to freeze the array, because being in an
  1414. * active sync request, there is no normal IO, and
  1415. * no overlapping syncs.
  1416. * We don't need to check is_badblock() again as we
  1417. * made sure that anything with a bad block in range
  1418. * will have bi_end_io clear.
  1419. */
  1420. struct mddev *mddev = r1_bio->mddev;
  1421. struct r1conf *conf = mddev->private;
  1422. struct bio *bio = r1_bio->bios[r1_bio->read_disk];
  1423. sector_t sect = r1_bio->sector;
  1424. int sectors = r1_bio->sectors;
  1425. int idx = 0;
  1426. while(sectors) {
  1427. int s = sectors;
  1428. int d = r1_bio->read_disk;
  1429. int success = 0;
  1430. struct md_rdev *rdev;
  1431. int start;
  1432. if (s > (PAGE_SIZE>>9))
  1433. s = PAGE_SIZE >> 9;
  1434. do {
  1435. if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
  1436. /* No rcu protection needed here devices
  1437. * can only be removed when no resync is
  1438. * active, and resync is currently active
  1439. */
  1440. rdev = conf->mirrors[d].rdev;
  1441. if (sync_page_io(rdev, sect, s<<9,
  1442. bio->bi_io_vec[idx].bv_page,
  1443. READ, false)) {
  1444. success = 1;
  1445. break;
  1446. }
  1447. }
  1448. d++;
  1449. if (d == conf->raid_disks * 2)
  1450. d = 0;
  1451. } while (!success && d != r1_bio->read_disk);
  1452. if (!success) {
  1453. char b[BDEVNAME_SIZE];
  1454. int abort = 0;
  1455. /* Cannot read from anywhere, this block is lost.
  1456. * Record a bad block on each device. If that doesn't
  1457. * work just disable and interrupt the recovery.
  1458. * Don't fail devices as that won't really help.
  1459. */
  1460. printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O read error"
  1461. " for block %llu\n",
  1462. mdname(mddev),
  1463. bdevname(bio->bi_bdev, b),
  1464. (unsigned long long)r1_bio->sector);
  1465. for (d = 0; d < conf->raid_disks * 2; d++) {
  1466. rdev = conf->mirrors[d].rdev;
  1467. if (!rdev || test_bit(Faulty, &rdev->flags))
  1468. continue;
  1469. if (!rdev_set_badblocks(rdev, sect, s, 0))
  1470. abort = 1;
  1471. }
  1472. if (abort) {
  1473. conf->recovery_disabled =
  1474. mddev->recovery_disabled;
  1475. set_bit(MD_RECOVERY_INTR, &mddev->recovery);
  1476. md_done_sync(mddev, r1_bio->sectors, 0);
  1477. put_buf(r1_bio);
  1478. return 0;
  1479. }
  1480. /* Try next page */
  1481. sectors -= s;
  1482. sect += s;
  1483. idx++;
  1484. continue;
  1485. }
  1486. start = d;
  1487. /* write it back and re-read */
  1488. while (d != r1_bio->read_disk) {
  1489. if (d == 0)
  1490. d = conf->raid_disks * 2;
  1491. d--;
  1492. if (r1_bio->bios[d]->bi_end_io != end_sync_read)
  1493. continue;
  1494. rdev = conf->mirrors[d].rdev;
  1495. if (r1_sync_page_io(rdev, sect, s,
  1496. bio->bi_io_vec[idx].bv_page,
  1497. WRITE) == 0) {
  1498. r1_bio->bios[d]->bi_end_io = NULL;
  1499. rdev_dec_pending(rdev, mddev);
  1500. }
  1501. }
  1502. d = start;
  1503. while (d != r1_bio->read_disk) {
  1504. if (d == 0)
  1505. d = conf->raid_disks * 2;
  1506. d--;
  1507. if (r1_bio->bios[d]->bi_end_io != end_sync_read)
  1508. continue;
  1509. rdev = conf->mirrors[d].rdev;
  1510. if (r1_sync_page_io(rdev, sect, s,
  1511. bio->bi_io_vec[idx].bv_page,
  1512. READ) != 0)
  1513. atomic_add(s, &rdev->corrected_errors);
  1514. }
  1515. sectors -= s;
  1516. sect += s;
  1517. idx ++;
  1518. }
  1519. set_bit(R1BIO_Uptodate, &r1_bio->state);
  1520. set_bit(BIO_UPTODATE, &bio->bi_flags);
  1521. return 1;
  1522. }
  1523. static int process_checks(struct r1bio *r1_bio)
  1524. {
  1525. /* We have read all readable devices. If we haven't
  1526. * got the block, then there is no hope left.
  1527. * If we have, then we want to do a comparison
  1528. * and skip the write if everything is the same.
  1529. * If any blocks failed to read, then we need to
  1530. * attempt an over-write
  1531. */
  1532. struct mddev *mddev = r1_bio->mddev;
  1533. struct r1conf *conf = mddev->private;
  1534. int primary;
  1535. int i;
  1536. int vcnt;
  1537. for (primary = 0; primary < conf->raid_disks * 2; primary++)
  1538. if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
  1539. test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) {
  1540. r1_bio->bios[primary]->bi_end_io = NULL;
  1541. rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
  1542. break;
  1543. }
  1544. r1_bio->read_disk = primary;
  1545. vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
  1546. for (i = 0; i < conf->raid_disks * 2; i++) {
  1547. int j;
  1548. struct bio *pbio = r1_bio->bios[primary];
  1549. struct bio *sbio = r1_bio->bios[i];
  1550. int size;
  1551. if (r1_bio->bios[i]->bi_end_io != end_sync_read)
  1552. continue;
  1553. if (test_bit(BIO_UPTODATE, &sbio->bi_flags)) {
  1554. for (j = vcnt; j-- ; ) {
  1555. struct page *p, *s;
  1556. p = pbio->bi_io_vec[j].bv_page;
  1557. s = sbio->bi_io_vec[j].bv_page;
  1558. if (memcmp(page_address(p),
  1559. page_address(s),
  1560. sbio->bi_io_vec[j].bv_len))
  1561. break;
  1562. }
  1563. } else
  1564. j = 0;
  1565. if (j >= 0)
  1566. mddev->resync_mismatches += r1_bio->sectors;
  1567. if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
  1568. && test_bit(BIO_UPTODATE, &sbio->bi_flags))) {
  1569. /* No need to write to this device. */
  1570. sbio->bi_end_io = NULL;
  1571. rdev_dec_pending(conf->mirrors[i].rdev, mddev);
  1572. continue;
  1573. }
  1574. /* fixup the bio for reuse */
  1575. sbio->bi_vcnt = vcnt;
  1576. sbio->bi_size = r1_bio->sectors << 9;
  1577. sbio->bi_idx = 0;
  1578. sbio->bi_phys_segments = 0;
  1579. sbio->bi_flags &= ~(BIO_POOL_MASK - 1);
  1580. sbio->bi_flags |= 1 << BIO_UPTODATE;
  1581. sbio->bi_next = NULL;
  1582. sbio->bi_sector = r1_bio->sector +
  1583. conf->mirrors[i].rdev->data_offset;
  1584. sbio->bi_bdev = conf->mirrors[i].rdev->bdev;
  1585. size = sbio->bi_size;
  1586. for (j = 0; j < vcnt ; j++) {
  1587. struct bio_vec *bi;
  1588. bi = &sbio->bi_io_vec[j];
  1589. bi->bv_offset = 0;
  1590. if (size > PAGE_SIZE)
  1591. bi->bv_len = PAGE_SIZE;
  1592. else
  1593. bi->bv_len = size;
  1594. size -= PAGE_SIZE;
  1595. memcpy(page_address(bi->bv_page),
  1596. page_address(pbio->bi_io_vec[j].bv_page),
  1597. PAGE_SIZE);
  1598. }
  1599. }
  1600. return 0;
  1601. }
  1602. static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
  1603. {
  1604. struct r1conf *conf = mddev->private;
  1605. int i;
  1606. int disks = conf->raid_disks * 2;
  1607. struct bio *bio, *wbio;
  1608. bio = r1_bio->bios[r1_bio->read_disk];
  1609. if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
  1610. /* ouch - failed to read all of that. */
  1611. if (!fix_sync_read_error(r1_bio))
  1612. return;
  1613. if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
  1614. if (process_checks(r1_bio) < 0)
  1615. return;
  1616. /*
  1617. * schedule writes
  1618. */
  1619. atomic_set(&r1_bio->remaining, 1);
  1620. for (i = 0; i < disks ; i++) {
  1621. wbio = r1_bio->bios[i];
  1622. if (wbio->bi_end_io == NULL ||
  1623. (wbio->bi_end_io == end_sync_read &&
  1624. (i == r1_bio->read_disk ||
  1625. !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
  1626. continue;
  1627. wbio->bi_rw = WRITE;
  1628. wbio->bi_end_io = end_sync_write;
  1629. atomic_inc(&r1_bio->remaining);
  1630. md_sync_acct(conf->mirrors[i].rdev->bdev, wbio->bi_size >> 9);
  1631. generic_make_request(wbio);
  1632. }
  1633. if (atomic_dec_and_test(&r1_bio->remaining)) {
  1634. /* if we're here, all write(s) have completed, so clean up */
  1635. int s = r1_bio->sectors;
  1636. if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
  1637. test_bit(R1BIO_WriteError, &r1_bio->state))
  1638. reschedule_retry(r1_bio);
  1639. else {
  1640. put_buf(r1_bio);
  1641. md_done_sync(mddev, s, 1);
  1642. }
  1643. }
  1644. }
  1645. /*
  1646. * This is a kernel thread which:
  1647. *
  1648. * 1. Retries failed read operations on working mirrors.
  1649. * 2. Updates the raid superblock when problems encounter.
  1650. * 3. Performs writes following reads for array synchronising.
  1651. */
  1652. static void fix_read_error(struct r1conf *conf, int read_disk,
  1653. sector_t sect, int sectors)
  1654. {
  1655. struct mddev *mddev = conf->mddev;
  1656. while(sectors) {
  1657. int s = sectors;
  1658. int d = read_disk;
  1659. int success = 0;
  1660. int start;
  1661. struct md_rdev *rdev;
  1662. if (s > (PAGE_SIZE>>9))
  1663. s = PAGE_SIZE >> 9;
  1664. do {
  1665. /* Note: no rcu protection needed here
  1666. * as this is synchronous in the raid1d thread
  1667. * which is the thread that might remove
  1668. * a device. If raid1d ever becomes multi-threaded....
  1669. */
  1670. sector_t first_bad;
  1671. int bad_sectors;
  1672. rdev = conf->mirrors[d].rdev;
  1673. if (rdev &&
  1674. (test_bit(In_sync, &rdev->flags) ||
  1675. (!test_bit(Faulty, &rdev->flags) &&
  1676. rdev->recovery_offset >= sect + s)) &&
  1677. is_badblock(rdev, sect, s,
  1678. &first_bad, &bad_sectors) == 0 &&
  1679. sync_page_io(rdev, sect, s<<9,
  1680. conf->tmppage, READ, false))
  1681. success = 1;
  1682. else {
  1683. d++;
  1684. if (d == conf->raid_disks * 2)
  1685. d = 0;
  1686. }
  1687. } while (!success && d != read_disk);
  1688. if (!success) {
  1689. /* Cannot read from anywhere - mark it bad */
  1690. struct md_rdev *rdev = conf->mirrors[read_disk].rdev;
  1691. if (!rdev_set_badblocks(rdev, sect, s, 0))
  1692. md_error(mddev, rdev);
  1693. break;
  1694. }
  1695. /* write it back and re-read */
  1696. start = d;
  1697. while (d != read_disk) {
  1698. if (d==0)
  1699. d = conf->raid_disks * 2;
  1700. d--;
  1701. rdev = conf->mirrors[d].rdev;
  1702. if (rdev &&
  1703. test_bit(In_sync, &rdev->flags))
  1704. r1_sync_page_io(rdev, sect, s,
  1705. conf->tmppage, WRITE);
  1706. }
  1707. d = start;
  1708. while (d != read_disk) {
  1709. char b[BDEVNAME_SIZE];
  1710. if (d==0)
  1711. d = conf->raid_disks * 2;
  1712. d--;
  1713. rdev = conf->mirrors[d].rdev;
  1714. if (rdev &&
  1715. test_bit(In_sync, &rdev->flags)) {
  1716. if (r1_sync_page_io(rdev, sect, s,
  1717. conf->tmppage, READ)) {
  1718. atomic_add(s, &rdev->corrected_errors);
  1719. printk(KERN_INFO
  1720. "md/raid1:%s: read error corrected "
  1721. "(%d sectors at %llu on %s)\n",
  1722. mdname(mddev), s,
  1723. (unsigned long long)(sect +
  1724. rdev->data_offset),
  1725. bdevname(rdev->bdev, b));
  1726. }
  1727. }
  1728. }
  1729. sectors -= s;
  1730. sect += s;
  1731. }
  1732. }
  1733. static void bi_complete(struct bio *bio, int error)
  1734. {
  1735. complete((struct completion *)bio->bi_private);
  1736. }
  1737. static int submit_bio_wait(int rw, struct bio *bio)
  1738. {
  1739. struct completion event;
  1740. rw |= REQ_SYNC;
  1741. init_completion(&event);
  1742. bio->bi_private = &event;
  1743. bio->bi_end_io = bi_complete;
  1744. submit_bio(rw, bio);
  1745. wait_for_completion(&event);
  1746. return test_bit(BIO_UPTODATE, &bio->bi_flags);
  1747. }
  1748. static int narrow_write_error(struct r1bio *r1_bio, int i)
  1749. {
  1750. struct mddev *mddev = r1_bio->mddev;
  1751. struct r1conf *conf = mddev->private;
  1752. struct md_rdev *rdev = conf->mirrors[i].rdev;
  1753. int vcnt, idx;
  1754. struct bio_vec *vec;
  1755. /* bio has the data to be written to device 'i' where
  1756. * we just recently had a write error.
  1757. * We repeatedly clone the bio and trim down to one block,
  1758. * then try the write. Where the write fails we record
  1759. * a bad block.
  1760. * It is conceivable that the bio doesn't exactly align with
  1761. * blocks. We must handle this somehow.
  1762. *
  1763. * We currently own a reference on the rdev.
  1764. */
  1765. int block_sectors;
  1766. sector_t sector;
  1767. int sectors;
  1768. int sect_to_write = r1_bio->sectors;
  1769. int ok = 1;
  1770. if (rdev->badblocks.shift < 0)
  1771. return 0;
  1772. block_sectors = 1 << rdev->badblocks.shift;
  1773. sector = r1_bio->sector;
  1774. sectors = ((sector + block_sectors)
  1775. & ~(sector_t)(block_sectors - 1))
  1776. - sector;
  1777. if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
  1778. vcnt = r1_bio->behind_page_count;
  1779. vec = r1_bio->behind_bvecs;
  1780. idx = 0;
  1781. while (vec[idx].bv_page == NULL)
  1782. idx++;
  1783. } else {
  1784. vcnt = r1_bio->master_bio->bi_vcnt;
  1785. vec = r1_bio->master_bio->bi_io_vec;
  1786. idx = r1_bio->master_bio->bi_idx;
  1787. }
  1788. while (sect_to_write) {
  1789. struct bio *wbio;
  1790. if (sectors > sect_to_write)
  1791. sectors = sect_to_write;
  1792. /* Write at 'sector' for 'sectors'*/
  1793. wbio = bio_alloc_mddev(GFP_NOIO, vcnt, mddev);
  1794. memcpy(wbio->bi_io_vec, vec, vcnt * sizeof(struct bio_vec));
  1795. wbio->bi_sector = r1_bio->sector;
  1796. wbio->bi_rw = WRITE;
  1797. wbio->bi_vcnt = vcnt;
  1798. wbio->bi_size = r1_bio->sectors << 9;
  1799. wbio->bi_idx = idx;
  1800. md_trim_bio(wbio, sector - r1_bio->sector, sectors);
  1801. wbio->bi_sector += rdev->data_offset;
  1802. wbio->bi_bdev = rdev->bdev;
  1803. if (submit_bio_wait(WRITE, wbio) == 0)
  1804. /* failure! */
  1805. ok = rdev_set_badblocks(rdev, sector,
  1806. sectors, 0)
  1807. && ok;
  1808. bio_put(wbio);
  1809. sect_to_write -= sectors;
  1810. sector += sectors;
  1811. sectors = block_sectors;
  1812. }
  1813. return ok;
  1814. }
  1815. static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
  1816. {
  1817. int m;
  1818. int s = r1_bio->sectors;
  1819. for (m = 0; m < conf->raid_disks * 2 ; m++) {
  1820. struct md_rdev *rdev = conf->mirrors[m].rdev;
  1821. struct bio *bio = r1_bio->bios[m];
  1822. if (bio->bi_end_io == NULL)
  1823. continue;
  1824. if (test_bit(BIO_UPTODATE, &bio->bi_flags) &&
  1825. test_bit(R1BIO_MadeGood, &r1_bio->state)) {
  1826. rdev_clear_badblocks(rdev, r1_bio->sector, s, 0);
  1827. }
  1828. if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
  1829. test_bit(R1BIO_WriteError, &r1_bio->state)) {
  1830. if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0))
  1831. md_error(conf->mddev, rdev);
  1832. }
  1833. }
  1834. put_buf(r1_bio);
  1835. md_done_sync(conf->mddev, s, 1);
  1836. }
  1837. static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
  1838. {
  1839. int m;
  1840. for (m = 0; m < conf->raid_disks * 2 ; m++)
  1841. if (r1_bio->bios[m] == IO_MADE_GOOD) {
  1842. struct md_rdev *rdev = conf->mirrors[m].rdev;
  1843. rdev_clear_badblocks(rdev,
  1844. r1_bio->sector,
  1845. r1_bio->sectors, 0);
  1846. rdev_dec_pending(rdev, conf->mddev);
  1847. } else if (r1_bio->bios[m] != NULL) {
  1848. /* This drive got a write error. We need to
  1849. * narrow down and record precise write
  1850. * errors.
  1851. */
  1852. if (!narrow_write_error(r1_bio, m)) {
  1853. md_error(conf->mddev,
  1854. conf->mirrors[m].rdev);
  1855. /* an I/O failed, we can't clear the bitmap */
  1856. set_bit(R1BIO_Degraded, &r1_bio->state);
  1857. }
  1858. rdev_dec_pending(conf->mirrors[m].rdev,
  1859. conf->mddev);
  1860. }
  1861. if (test_bit(R1BIO_WriteError, &r1_bio->state))
  1862. close_write(r1_bio);
  1863. raid_end_bio_io(r1_bio);
  1864. }
  1865. static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
  1866. {
  1867. int disk;
  1868. int max_sectors;
  1869. struct mddev *mddev = conf->mddev;
  1870. struct bio *bio;
  1871. char b[BDEVNAME_SIZE];
  1872. struct md_rdev *rdev;
  1873. clear_bit(R1BIO_ReadError, &r1_bio->state);
  1874. /* we got a read error. Maybe the drive is bad. Maybe just
  1875. * the block and we can fix it.
  1876. * We freeze all other IO, and try reading the block from
  1877. * other devices. When we find one, we re-write
  1878. * and check it that fixes the read error.
  1879. * This is all done synchronously while the array is
  1880. * frozen
  1881. */
  1882. if (mddev->ro == 0) {
  1883. freeze_array(conf);
  1884. fix_read_error(conf, r1_bio->read_disk,
  1885. r1_bio->sector, r1_bio->sectors);
  1886. unfreeze_array(conf);
  1887. } else
  1888. md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
  1889. bio = r1_bio->bios[r1_bio->read_disk];
  1890. bdevname(bio->bi_bdev, b);
  1891. read_more:
  1892. disk = read_balance(conf, r1_bio, &max_sectors);
  1893. if (disk == -1) {
  1894. printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O"
  1895. " read error for block %llu\n",
  1896. mdname(mddev), b, (unsigned long long)r1_bio->sector);
  1897. raid_end_bio_io(r1_bio);
  1898. } else {
  1899. const unsigned long do_sync
  1900. = r1_bio->master_bio->bi_rw & REQ_SYNC;
  1901. if (bio) {
  1902. r1_bio->bios[r1_bio->read_disk] =
  1903. mddev->ro ? IO_BLOCKED : NULL;
  1904. bio_put(bio);
  1905. }
  1906. r1_bio->read_disk = disk;
  1907. bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
  1908. md_trim_bio(bio, r1_bio->sector - bio->bi_sector, max_sectors);
  1909. r1_bio->bios[r1_bio->read_disk] = bio;
  1910. rdev = conf->mirrors[disk].rdev;
  1911. printk_ratelimited(KERN_ERR
  1912. "md/raid1:%s: redirecting sector %llu"
  1913. " to other mirror: %s\n",
  1914. mdname(mddev),
  1915. (unsigned long long)r1_bio->sector,
  1916. bdevname(rdev->bdev, b));
  1917. bio->bi_sector = r1_bio->sector + rdev->data_offset;
  1918. bio->bi_bdev = rdev->bdev;
  1919. bio->bi_end_io = raid1_end_read_request;
  1920. bio->bi_rw = READ | do_sync;
  1921. bio->bi_private = r1_bio;
  1922. if (max_sectors < r1_bio->sectors) {
  1923. /* Drat - have to split this up more */
  1924. struct bio *mbio = r1_bio->master_bio;
  1925. int sectors_handled = (r1_bio->sector + max_sectors
  1926. - mbio->bi_sector);
  1927. r1_bio->sectors = max_sectors;
  1928. spin_lock_irq(&conf->device_lock);
  1929. if (mbio->bi_phys_segments == 0)
  1930. mbio->bi_phys_segments = 2;
  1931. else
  1932. mbio->bi_phys_segments++;
  1933. spin_unlock_irq(&conf->device_lock);
  1934. generic_make_request(bio);
  1935. bio = NULL;
  1936. r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
  1937. r1_bio->master_bio = mbio;
  1938. r1_bio->sectors = (mbio->bi_size >> 9)
  1939. - sectors_handled;
  1940. r1_bio->state = 0;
  1941. set_bit(R1BIO_ReadError, &r1_bio->state);
  1942. r1_bio->mddev = mddev;
  1943. r1_bio->sector = mbio->bi_sector + sectors_handled;
  1944. goto read_more;
  1945. } else
  1946. generic_make_request(bio);
  1947. }
  1948. }
  1949. static void raid1d(struct mddev *mddev)
  1950. {
  1951. struct r1bio *r1_bio;
  1952. unsigned long flags;
  1953. struct r1conf *conf = mddev->private;
  1954. struct list_head *head = &conf->retry_list;
  1955. struct blk_plug plug;
  1956. md_check_recovery(mddev);
  1957. blk_start_plug(&plug);
  1958. for (;;) {
  1959. if (atomic_read(&mddev->plug_cnt) == 0)
  1960. flush_pending_writes(conf);
  1961. spin_lock_irqsave(&conf->device_lock, flags);
  1962. if (list_empty(head)) {
  1963. spin_unlock_irqrestore(&conf->device_lock, flags);
  1964. break;
  1965. }
  1966. r1_bio = list_entry(head->prev, struct r1bio, retry_list);
  1967. list_del(head->prev);
  1968. conf->nr_queued--;
  1969. spin_unlock_irqrestore(&conf->device_lock, flags);
  1970. mddev = r1_bio->mddev;
  1971. conf = mddev->private;
  1972. if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
  1973. if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
  1974. test_bit(R1BIO_WriteError, &r1_bio->state))
  1975. handle_sync_write_finished(conf, r1_bio);
  1976. else
  1977. sync_request_write(mddev, r1_bio);
  1978. } else if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
  1979. test_bit(R1BIO_WriteError, &r1_bio->state))
  1980. handle_write_finished(conf, r1_bio);
  1981. else if (test_bit(R1BIO_ReadError, &r1_bio->state))
  1982. handle_read_error(conf, r1_bio);
  1983. else
  1984. /* just a partial read to be scheduled from separate
  1985. * context
  1986. */
  1987. generic_make_request(r1_bio->bios[r1_bio->read_disk]);
  1988. cond_resched();
  1989. if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
  1990. md_check_recovery(mddev);
  1991. }
  1992. blk_finish_plug(&plug);
  1993. }
  1994. static int init_resync(struct r1conf *conf)
  1995. {
  1996. int buffs;
  1997. buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
  1998. BUG_ON(conf->r1buf_pool);
  1999. conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free,
  2000. conf->poolinfo);
  2001. if (!conf->r1buf_pool)
  2002. return -ENOMEM;
  2003. conf->next_resync = 0;
  2004. return 0;
  2005. }
  2006. /*
  2007. * perform a "sync" on one "block"
  2008. *
  2009. * We need to make sure that no normal I/O request - particularly write
  2010. * requests - conflict with active sync requests.
  2011. *
  2012. * This is achieved by tracking pending requests and a 'barrier' concept
  2013. * that can be installed to exclude normal IO requests.
  2014. */
  2015. static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster)
  2016. {
  2017. struct r1conf *conf = mddev->private;
  2018. struct r1bio *r1_bio;
  2019. struct bio *bio;
  2020. sector_t max_sector, nr_sectors;
  2021. int disk = -1;
  2022. int i;
  2023. int wonly = -1;
  2024. int write_targets = 0, read_targets = 0;
  2025. sector_t sync_blocks;
  2026. int still_degraded = 0;
  2027. int good_sectors = RESYNC_SECTORS;
  2028. int min_bad = 0; /* number of sectors that are bad in all devices */
  2029. if (!conf->r1buf_pool)
  2030. if (init_resync(conf))
  2031. return 0;
  2032. max_sector = mddev->dev_sectors;
  2033. if (sector_nr >= max_sector) {
  2034. /* If we aborted, we need to abort the
  2035. * sync on the 'current' bitmap chunk (there will
  2036. * only be one in raid1 resync.
  2037. * We can find the current addess in mddev->curr_resync
  2038. */
  2039. if (mddev->curr_resync < max_sector) /* aborted */
  2040. bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
  2041. &sync_blocks, 1);
  2042. else /* completed sync */
  2043. conf->fullsync = 0;
  2044. bitmap_close_sync(mddev->bitmap);
  2045. close_sync(conf);
  2046. return 0;
  2047. }
  2048. if (mddev->bitmap == NULL &&
  2049. mddev->recovery_cp == MaxSector &&
  2050. !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
  2051. conf->fullsync == 0) {
  2052. *skipped = 1;
  2053. return max_sector - sector_nr;
  2054. }
  2055. /* before building a request, check if we can skip these blocks..
  2056. * This call the bitmap_start_sync doesn't actually record anything
  2057. */
  2058. if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
  2059. !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
  2060. /* We can skip this block, and probably several more */
  2061. *skipped = 1;
  2062. return sync_blocks;
  2063. }
  2064. /*
  2065. * If there is non-resync activity waiting for a turn,
  2066. * and resync is going fast enough,
  2067. * then let it though before starting on this new sync request.
  2068. */
  2069. if (!go_faster && conf->nr_waiting)
  2070. msleep_interruptible(1000);
  2071. bitmap_cond_end_sync(mddev->bitmap, sector_nr);
  2072. r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
  2073. raise_barrier(conf);
  2074. conf->next_resync = sector_nr;
  2075. rcu_read_lock();
  2076. /*
  2077. * If we get a correctably read error during resync or recovery,
  2078. * we might want to read from a different device. So we
  2079. * flag all drives that could conceivably be read from for READ,
  2080. * and any others (which will be non-In_sync devices) for WRITE.
  2081. * If a read fails, we try reading from something else for which READ
  2082. * is OK.
  2083. */
  2084. r1_bio->mddev = mddev;
  2085. r1_bio->sector = sector_nr;
  2086. r1_bio->state = 0;
  2087. set_bit(R1BIO_IsSync, &r1_bio->state);
  2088. for (i = 0; i < conf->raid_disks * 2; i++) {
  2089. struct md_rdev *rdev;
  2090. bio = r1_bio->bios[i];
  2091. /* take from bio_init */
  2092. bio->bi_next = NULL;
  2093. bio->bi_flags &= ~(BIO_POOL_MASK-1);
  2094. bio->bi_flags |= 1 << BIO_UPTODATE;
  2095. bio->bi_rw = READ;
  2096. bio->bi_vcnt = 0;
  2097. bio->bi_idx = 0;
  2098. bio->bi_phys_segments = 0;
  2099. bio->bi_size = 0;
  2100. bio->bi_end_io = NULL;
  2101. bio->bi_private = NULL;
  2102. rdev = rcu_dereference(conf->mirrors[i].rdev);
  2103. if (rdev == NULL ||
  2104. test_bit(Faulty, &rdev->flags)) {
  2105. if (i < conf->raid_disks)
  2106. still_degraded = 1;
  2107. } else if (!test_bit(In_sync, &rdev->flags)) {
  2108. bio->bi_rw = WRITE;
  2109. bio->bi_end_io = end_sync_write;
  2110. write_targets ++;
  2111. } else {
  2112. /* may need to read from here */
  2113. sector_t first_bad = MaxSector;
  2114. int bad_sectors;
  2115. if (is_badblock(rdev, sector_nr, good_sectors,
  2116. &first_bad, &bad_sectors)) {
  2117. if (first_bad > sector_nr)
  2118. good_sectors = first_bad - sector_nr;
  2119. else {
  2120. bad_sectors -= (sector_nr - first_bad);
  2121. if (min_bad == 0 ||
  2122. min_bad > bad_sectors)
  2123. min_bad = bad_sectors;
  2124. }
  2125. }
  2126. if (sector_nr < first_bad) {
  2127. if (test_bit(WriteMostly, &rdev->flags)) {
  2128. if (wonly < 0)
  2129. wonly = i;
  2130. } else {
  2131. if (disk < 0)
  2132. disk = i;
  2133. }
  2134. bio->bi_rw = READ;
  2135. bio->bi_end_io = end_sync_read;
  2136. read_targets++;
  2137. }
  2138. }
  2139. if (bio->bi_end_io) {
  2140. atomic_inc(&rdev->nr_pending);
  2141. bio->bi_sector = sector_nr + rdev->data_offset;
  2142. bio->bi_bdev = rdev->bdev;
  2143. bio->bi_private = r1_bio;
  2144. }
  2145. }
  2146. rcu_read_unlock();
  2147. if (disk < 0)
  2148. disk = wonly;
  2149. r1_bio->read_disk = disk;
  2150. if (read_targets == 0 && min_bad > 0) {
  2151. /* These sectors are bad on all InSync devices, so we
  2152. * need to mark them bad on all write targets
  2153. */
  2154. int ok = 1;
  2155. for (i = 0 ; i < conf->raid_disks * 2 ; i++)
  2156. if (r1_bio->bios[i]->bi_end_io == end_sync_write) {
  2157. struct md_rdev *rdev = conf->mirrors[i].rdev;
  2158. ok = rdev_set_badblocks(rdev, sector_nr,
  2159. min_bad, 0
  2160. ) && ok;
  2161. }
  2162. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  2163. *skipped = 1;
  2164. put_buf(r1_bio);
  2165. if (!ok) {
  2166. /* Cannot record the badblocks, so need to
  2167. * abort the resync.
  2168. * If there are multiple read targets, could just
  2169. * fail the really bad ones ???
  2170. */
  2171. conf->recovery_disabled = mddev->recovery_disabled;
  2172. set_bit(MD_RECOVERY_INTR, &mddev->recovery);
  2173. return 0;
  2174. } else
  2175. return min_bad;
  2176. }
  2177. if (min_bad > 0 && min_bad < good_sectors) {
  2178. /* only resync enough to reach the next bad->good
  2179. * transition */
  2180. good_sectors = min_bad;
  2181. }
  2182. if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
  2183. /* extra read targets are also write targets */
  2184. write_targets += read_targets-1;
  2185. if (write_targets == 0 || read_targets == 0) {
  2186. /* There is nowhere to write, so all non-sync
  2187. * drives must be failed - so we are finished
  2188. */
  2189. sector_t rv = max_sector - sector_nr;
  2190. *skipped = 1;
  2191. put_buf(r1_bio);
  2192. return rv;
  2193. }
  2194. if (max_sector > mddev->resync_max)
  2195. max_sector = mddev->resync_max; /* Don't do IO beyond here */
  2196. if (max_sector > sector_nr + good_sectors)
  2197. max_sector = sector_nr + good_sectors;
  2198. nr_sectors = 0;
  2199. sync_blocks = 0;
  2200. do {
  2201. struct page *page;
  2202. int len = PAGE_SIZE;
  2203. if (sector_nr + (len>>9) > max_sector)
  2204. len = (max_sector - sector_nr) << 9;
  2205. if (len == 0)
  2206. break;
  2207. if (sync_blocks == 0) {
  2208. if (!bitmap_start_sync(mddev->bitmap, sector_nr,
  2209. &sync_blocks, still_degraded) &&
  2210. !conf->fullsync &&
  2211. !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
  2212. break;
  2213. BUG_ON(sync_blocks < (PAGE_SIZE>>9));
  2214. if ((len >> 9) > sync_blocks)
  2215. len = sync_blocks<<9;
  2216. }
  2217. for (i = 0 ; i < conf->raid_disks * 2; i++) {
  2218. bio = r1_bio->bios[i];
  2219. if (bio->bi_end_io) {
  2220. page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
  2221. if (bio_add_page(bio, page, len, 0) == 0) {
  2222. /* stop here */
  2223. bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
  2224. while (i > 0) {
  2225. i--;
  2226. bio = r1_bio->bios[i];
  2227. if (bio->bi_end_io==NULL)
  2228. continue;
  2229. /* remove last page from this bio */
  2230. bio->bi_vcnt--;
  2231. bio->bi_size -= len;
  2232. bio->bi_flags &= ~(1<< BIO_SEG_VALID);
  2233. }
  2234. goto bio_full;
  2235. }
  2236. }
  2237. }
  2238. nr_sectors += len>>9;
  2239. sector_nr += len>>9;
  2240. sync_blocks -= (len>>9);
  2241. } while (r1_bio->bios[disk]->bi_vcnt < RESYNC_PAGES);
  2242. bio_full:
  2243. r1_bio->sectors = nr_sectors;
  2244. /* For a user-requested sync, we read all readable devices and do a
  2245. * compare
  2246. */
  2247. if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
  2248. atomic_set(&r1_bio->remaining, read_targets);
  2249. for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) {
  2250. bio = r1_bio->bios[i];
  2251. if (bio->bi_end_io == end_sync_read) {
  2252. read_targets--;
  2253. md_sync_acct(bio->bi_bdev, nr_sectors);
  2254. generic_make_request(bio);
  2255. }
  2256. }
  2257. } else {
  2258. atomic_set(&r1_bio->remaining, 1);
  2259. bio = r1_bio->bios[r1_bio->read_disk];
  2260. md_sync_acct(bio->bi_bdev, nr_sectors);
  2261. generic_make_request(bio);
  2262. }
  2263. return nr_sectors;
  2264. }
  2265. static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks)
  2266. {
  2267. if (sectors)
  2268. return sectors;
  2269. return mddev->dev_sectors;
  2270. }
  2271. static struct r1conf *setup_conf(struct mddev *mddev)
  2272. {
  2273. struct r1conf *conf;
  2274. int i;
  2275. struct mirror_info *disk;
  2276. struct md_rdev *rdev;
  2277. int err = -ENOMEM;
  2278. conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL);
  2279. if (!conf)
  2280. goto abort;
  2281. conf->mirrors = kzalloc(sizeof(struct mirror_info)
  2282. * mddev->raid_disks * 2,
  2283. GFP_KERNEL);
  2284. if (!conf->mirrors)
  2285. goto abort;
  2286. conf->tmppage = alloc_page(GFP_KERNEL);
  2287. if (!conf->tmppage)
  2288. goto abort;
  2289. conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
  2290. if (!conf->poolinfo)
  2291. goto abort;
  2292. conf->poolinfo->raid_disks = mddev->raid_disks * 2;
  2293. conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
  2294. r1bio_pool_free,
  2295. conf->poolinfo);
  2296. if (!conf->r1bio_pool)
  2297. goto abort;
  2298. conf->poolinfo->mddev = mddev;
  2299. err = -EINVAL;
  2300. spin_lock_init(&conf->device_lock);
  2301. rdev_for_each(rdev, mddev) {
  2302. struct request_queue *q;
  2303. int disk_idx = rdev->raid_disk;
  2304. if (disk_idx >= mddev->raid_disks
  2305. || disk_idx < 0)
  2306. continue;
  2307. if (test_bit(Replacement, &rdev->flags))
  2308. disk = conf->mirrors + conf->raid_disks + disk_idx;
  2309. else
  2310. disk = conf->mirrors + disk_idx;
  2311. if (disk->rdev)
  2312. goto abort;
  2313. disk->rdev = rdev;
  2314. q = bdev_get_queue(rdev->bdev);
  2315. if (q->merge_bvec_fn)
  2316. mddev->merge_check_needed = 1;
  2317. disk->head_position = 0;
  2318. }
  2319. conf->raid_disks = mddev->raid_disks;
  2320. conf->mddev = mddev;
  2321. INIT_LIST_HEAD(&conf->retry_list);
  2322. spin_lock_init(&conf->resync_lock);
  2323. init_waitqueue_head(&conf->wait_barrier);
  2324. bio_list_init(&conf->pending_bio_list);
  2325. conf->pending_count = 0;
  2326. conf->recovery_disabled = mddev->recovery_disabled - 1;
  2327. err = -EIO;
  2328. conf->last_used = -1;
  2329. for (i = 0; i < conf->raid_disks * 2; i++) {
  2330. disk = conf->mirrors + i;
  2331. if (i < conf->raid_disks &&
  2332. disk[conf->raid_disks].rdev) {
  2333. /* This slot has a replacement. */
  2334. if (!disk->rdev) {
  2335. /* No original, just make the replacement
  2336. * a recovering spare
  2337. */
  2338. disk->rdev =
  2339. disk[conf->raid_disks].rdev;
  2340. disk[conf->raid_disks].rdev = NULL;
  2341. } else if (!test_bit(In_sync, &disk->rdev->flags))
  2342. /* Original is not in_sync - bad */
  2343. goto abort;
  2344. }
  2345. if (!disk->rdev ||
  2346. !test_bit(In_sync, &disk->rdev->flags)) {
  2347. disk->head_position = 0;
  2348. if (disk->rdev &&
  2349. (disk->rdev->saved_raid_disk < 0))
  2350. conf->fullsync = 1;
  2351. } else if (conf->last_used < 0)
  2352. /*
  2353. * The first working device is used as a
  2354. * starting point to read balancing.
  2355. */
  2356. conf->last_used = i;
  2357. }
  2358. if (conf->last_used < 0) {
  2359. printk(KERN_ERR "md/raid1:%s: no operational mirrors\n",
  2360. mdname(mddev));
  2361. goto abort;
  2362. }
  2363. err = -ENOMEM;
  2364. conf->thread = md_register_thread(raid1d, mddev, "raid1");
  2365. if (!conf->thread) {
  2366. printk(KERN_ERR
  2367. "md/raid1:%s: couldn't allocate thread\n",
  2368. mdname(mddev));
  2369. goto abort;
  2370. }
  2371. return conf;
  2372. abort:
  2373. if (conf) {
  2374. if (conf->r1bio_pool)
  2375. mempool_destroy(conf->r1bio_pool);
  2376. kfree(conf->mirrors);
  2377. safe_put_page(conf->tmppage);
  2378. kfree(conf->poolinfo);
  2379. kfree(conf);
  2380. }
  2381. return ERR_PTR(err);
  2382. }
  2383. static int stop(struct mddev *mddev);
  2384. static int run(struct mddev *mddev)
  2385. {
  2386. struct r1conf *conf;
  2387. int i;
  2388. struct md_rdev *rdev;
  2389. int ret;
  2390. if (mddev->level != 1) {
  2391. printk(KERN_ERR "md/raid1:%s: raid level not set to mirroring (%d)\n",
  2392. mdname(mddev), mddev->level);
  2393. return -EIO;
  2394. }
  2395. if (mddev->reshape_position != MaxSector) {
  2396. printk(KERN_ERR "md/raid1:%s: reshape_position set but not supported\n",
  2397. mdname(mddev));
  2398. return -EIO;
  2399. }
  2400. /*
  2401. * copy the already verified devices into our private RAID1
  2402. * bookkeeping area. [whatever we allocate in run(),
  2403. * should be freed in stop()]
  2404. */
  2405. if (mddev->private == NULL)
  2406. conf = setup_conf(mddev);
  2407. else
  2408. conf = mddev->private;
  2409. if (IS_ERR(conf))
  2410. return PTR_ERR(conf);
  2411. rdev_for_each(rdev, mddev) {
  2412. if (!mddev->gendisk)
  2413. continue;
  2414. disk_stack_limits(mddev->gendisk, rdev->bdev,
  2415. rdev->data_offset << 9);
  2416. }
  2417. mddev->degraded = 0;
  2418. for (i=0; i < conf->raid_disks; i++)
  2419. if (conf->mirrors[i].rdev == NULL ||
  2420. !test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
  2421. test_bit(Faulty, &conf->mirrors[i].rdev->flags))
  2422. mddev->degraded++;
  2423. if (conf->raid_disks - mddev->degraded == 1)
  2424. mddev->recovery_cp = MaxSector;
  2425. if (mddev->recovery_cp != MaxSector)
  2426. printk(KERN_NOTICE "md/raid1:%s: not clean"
  2427. " -- starting background reconstruction\n",
  2428. mdname(mddev));
  2429. printk(KERN_INFO
  2430. "md/raid1:%s: active with %d out of %d mirrors\n",
  2431. mdname(mddev), mddev->raid_disks - mddev->degraded,
  2432. mddev->raid_disks);
  2433. /*
  2434. * Ok, everything is just fine now
  2435. */
  2436. mddev->thread = conf->thread;
  2437. conf->thread = NULL;
  2438. mddev->private = conf;
  2439. md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
  2440. if (mddev->queue) {
  2441. mddev->queue->backing_dev_info.congested_fn = raid1_congested;
  2442. mddev->queue->backing_dev_info.congested_data = mddev;
  2443. blk_queue_merge_bvec(mddev->queue, raid1_mergeable_bvec);
  2444. }
  2445. ret = md_integrity_register(mddev);
  2446. if (ret)
  2447. stop(mddev);
  2448. return ret;
  2449. }
  2450. static int stop(struct mddev *mddev)
  2451. {
  2452. struct r1conf *conf = mddev->private;
  2453. struct bitmap *bitmap = mddev->bitmap;
  2454. /* wait for behind writes to complete */
  2455. if (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
  2456. printk(KERN_INFO "md/raid1:%s: behind writes in progress - waiting to stop.\n",
  2457. mdname(mddev));
  2458. /* need to kick something here to make sure I/O goes? */
  2459. wait_event(bitmap->behind_wait,
  2460. atomic_read(&bitmap->behind_writes) == 0);
  2461. }
  2462. raise_barrier(conf);
  2463. lower_barrier(conf);
  2464. md_unregister_thread(&mddev->thread);
  2465. if (conf->r1bio_pool)
  2466. mempool_destroy(conf->r1bio_pool);
  2467. kfree(conf->mirrors);
  2468. kfree(conf->poolinfo);
  2469. kfree(conf);
  2470. mddev->private = NULL;
  2471. return 0;
  2472. }
  2473. static int raid1_resize(struct mddev *mddev, sector_t sectors)
  2474. {
  2475. /* no resync is happening, and there is enough space
  2476. * on all devices, so we can resize.
  2477. * We need to make sure resync covers any new space.
  2478. * If the array is shrinking we should possibly wait until
  2479. * any io in the removed space completes, but it hardly seems
  2480. * worth it.
  2481. */
  2482. sector_t newsize = raid1_size(mddev, sectors, 0);
  2483. if (mddev->external_size &&
  2484. mddev->array_sectors > newsize)
  2485. return -EINVAL;
  2486. if (mddev->bitmap) {
  2487. int ret = bitmap_resize(mddev->bitmap, newsize, 0, 0);
  2488. if (ret)
  2489. return ret;
  2490. }
  2491. md_set_array_sectors(mddev, newsize);
  2492. set_capacity(mddev->gendisk, mddev->array_sectors);
  2493. revalidate_disk(mddev->gendisk);
  2494. if (sectors > mddev->dev_sectors &&
  2495. mddev->recovery_cp > mddev->dev_sectors) {
  2496. mddev->recovery_cp = mddev->dev_sectors;
  2497. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  2498. }
  2499. mddev->dev_sectors = sectors;
  2500. mddev->resync_max_sectors = sectors;
  2501. return 0;
  2502. }
  2503. static int raid1_reshape(struct mddev *mddev)
  2504. {
  2505. /* We need to:
  2506. * 1/ resize the r1bio_pool
  2507. * 2/ resize conf->mirrors
  2508. *
  2509. * We allocate a new r1bio_pool if we can.
  2510. * Then raise a device barrier and wait until all IO stops.
  2511. * Then resize conf->mirrors and swap in the new r1bio pool.
  2512. *
  2513. * At the same time, we "pack" the devices so that all the missing
  2514. * devices have the higher raid_disk numbers.
  2515. */
  2516. mempool_t *newpool, *oldpool;
  2517. struct pool_info *newpoolinfo;
  2518. struct mirror_info *newmirrors;
  2519. struct r1conf *conf = mddev->private;
  2520. int cnt, raid_disks;
  2521. unsigned long flags;
  2522. int d, d2, err;
  2523. /* Cannot change chunk_size, layout, or level */
  2524. if (mddev->chunk_sectors != mddev->new_chunk_sectors ||
  2525. mddev->layout != mddev->new_layout ||
  2526. mddev->level != mddev->new_level) {
  2527. mddev->new_chunk_sectors = mddev->chunk_sectors;
  2528. mddev->new_layout = mddev->layout;
  2529. mddev->new_level = mddev->level;
  2530. return -EINVAL;
  2531. }
  2532. err = md_allow_write(mddev);
  2533. if (err)
  2534. return err;
  2535. raid_disks = mddev->raid_disks + mddev->delta_disks;
  2536. if (raid_disks < conf->raid_disks) {
  2537. cnt=0;
  2538. for (d= 0; d < conf->raid_disks; d++)
  2539. if (conf->mirrors[d].rdev)
  2540. cnt++;
  2541. if (cnt > raid_disks)
  2542. return -EBUSY;
  2543. }
  2544. newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL);
  2545. if (!newpoolinfo)
  2546. return -ENOMEM;
  2547. newpoolinfo->mddev = mddev;
  2548. newpoolinfo->raid_disks = raid_disks * 2;
  2549. newpool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
  2550. r1bio_pool_free, newpoolinfo);
  2551. if (!newpool) {
  2552. kfree(newpoolinfo);
  2553. return -ENOMEM;
  2554. }
  2555. newmirrors = kzalloc(sizeof(struct mirror_info) * raid_disks * 2,
  2556. GFP_KERNEL);
  2557. if (!newmirrors) {
  2558. kfree(newpoolinfo);
  2559. mempool_destroy(newpool);
  2560. return -ENOMEM;
  2561. }
  2562. raise_barrier(conf);
  2563. /* ok, everything is stopped */
  2564. oldpool = conf->r1bio_pool;
  2565. conf->r1bio_pool = newpool;
  2566. for (d = d2 = 0; d < conf->raid_disks; d++) {
  2567. struct md_rdev *rdev = conf->mirrors[d].rdev;
  2568. if (rdev && rdev->raid_disk != d2) {
  2569. sysfs_unlink_rdev(mddev, rdev);
  2570. rdev->raid_disk = d2;
  2571. sysfs_unlink_rdev(mddev, rdev);
  2572. if (sysfs_link_rdev(mddev, rdev))
  2573. printk(KERN_WARNING
  2574. "md/raid1:%s: cannot register rd%d\n",
  2575. mdname(mddev), rdev->raid_disk);
  2576. }
  2577. if (rdev)
  2578. newmirrors[d2++].rdev = rdev;
  2579. }
  2580. kfree(conf->mirrors);
  2581. conf->mirrors = newmirrors;
  2582. kfree(conf->poolinfo);
  2583. conf->poolinfo = newpoolinfo;
  2584. spin_lock_irqsave(&conf->device_lock, flags);
  2585. mddev->degraded += (raid_disks - conf->raid_disks);
  2586. spin_unlock_irqrestore(&conf->device_lock, flags);
  2587. conf->raid_disks = mddev->raid_disks = raid_disks;
  2588. mddev->delta_disks = 0;
  2589. conf->last_used = 0; /* just make sure it is in-range */
  2590. lower_barrier(conf);
  2591. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  2592. md_wakeup_thread(mddev->thread);
  2593. mempool_destroy(oldpool);
  2594. return 0;
  2595. }
  2596. static void raid1_quiesce(struct mddev *mddev, int state)
  2597. {
  2598. struct r1conf *conf = mddev->private;
  2599. switch(state) {
  2600. case 2: /* wake for suspend */
  2601. wake_up(&conf->wait_barrier);
  2602. break;
  2603. case 1:
  2604. raise_barrier(conf);
  2605. break;
  2606. case 0:
  2607. lower_barrier(conf);
  2608. break;
  2609. }
  2610. }
  2611. static void *raid1_takeover(struct mddev *mddev)
  2612. {
  2613. /* raid1 can take over:
  2614. * raid5 with 2 devices, any layout or chunk size
  2615. */
  2616. if (mddev->level == 5 && mddev->raid_disks == 2) {
  2617. struct r1conf *conf;
  2618. mddev->new_level = 1;
  2619. mddev->new_layout = 0;
  2620. mddev->new_chunk_sectors = 0;
  2621. conf = setup_conf(mddev);
  2622. if (!IS_ERR(conf))
  2623. conf->barrier = 1;
  2624. return conf;
  2625. }
  2626. return ERR_PTR(-EINVAL);
  2627. }
  2628. static struct md_personality raid1_personality =
  2629. {
  2630. .name = "raid1",
  2631. .level = 1,
  2632. .owner = THIS_MODULE,
  2633. .make_request = make_request,
  2634. .run = run,
  2635. .stop = stop,
  2636. .status = status,
  2637. .error_handler = error,
  2638. .hot_add_disk = raid1_add_disk,
  2639. .hot_remove_disk= raid1_remove_disk,
  2640. .spare_active = raid1_spare_active,
  2641. .sync_request = sync_request,
  2642. .resize = raid1_resize,
  2643. .size = raid1_size,
  2644. .check_reshape = raid1_reshape,
  2645. .quiesce = raid1_quiesce,
  2646. .takeover = raid1_takeover,
  2647. };
  2648. static int __init raid_init(void)
  2649. {
  2650. return register_md_personality(&raid1_personality);
  2651. }
  2652. static void raid_exit(void)
  2653. {
  2654. unregister_md_personality(&raid1_personality);
  2655. }
  2656. module_init(raid_init);
  2657. module_exit(raid_exit);
  2658. MODULE_LICENSE("GPL");
  2659. MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD");
  2660. MODULE_ALIAS("md-personality-3"); /* RAID1 */
  2661. MODULE_ALIAS("md-raid1");
  2662. MODULE_ALIAS("md-level-1");
  2663. module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);