raid10.c 71 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701
  1. /*
  2. * raid10.c : Multiple Devices driver for Linux
  3. *
  4. * Copyright (C) 2000-2004 Neil Brown
  5. *
  6. * RAID-10 support for md.
  7. *
  8. * Base on code in raid1.c. See raid1.c for further copyright information.
  9. *
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License as published by
  13. * the Free Software Foundation; either version 2, or (at your option)
  14. * any later version.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * (for example /usr/src/linux/COPYING); if not, write to the Free
  18. * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  19. */
  20. #include <linux/slab.h>
  21. #include <linux/delay.h>
  22. #include <linux/blkdev.h>
  23. #include <linux/seq_file.h>
  24. #include <linux/ratelimit.h>
  25. #include "md.h"
  26. #include "raid10.h"
  27. #include "raid0.h"
  28. #include "bitmap.h"
  29. /*
  30. * RAID10 provides a combination of RAID0 and RAID1 functionality.
  31. * The layout of data is defined by
  32. * chunk_size
  33. * raid_disks
  34. * near_copies (stored in low byte of layout)
  35. * far_copies (stored in second byte of layout)
  36. * far_offset (stored in bit 16 of layout )
  37. *
  38. * The data to be stored is divided into chunks using chunksize.
  39. * Each device is divided into far_copies sections.
  40. * In each section, chunks are laid out in a style similar to raid0, but
  41. * near_copies copies of each chunk is stored (each on a different drive).
  42. * The starting device for each section is offset near_copies from the starting
  43. * device of the previous section.
  44. * Thus they are (near_copies*far_copies) of each chunk, and each is on a different
  45. * drive.
  46. * near_copies and far_copies must be at least one, and their product is at most
  47. * raid_disks.
  48. *
  49. * If far_offset is true, then the far_copies are handled a bit differently.
  50. * The copies are still in different stripes, but instead of be very far apart
  51. * on disk, there are adjacent stripes.
  52. */
  53. /*
  54. * Number of guaranteed r10bios in case of extreme VM load:
  55. */
  56. #define NR_RAID10_BIOS 256
  57. static void allow_barrier(conf_t *conf);
  58. static void lower_barrier(conf_t *conf);
  59. static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
  60. {
  61. conf_t *conf = data;
  62. int size = offsetof(struct r10bio_s, devs[conf->copies]);
  63. /* allocate a r10bio with room for raid_disks entries in the bios array */
  64. return kzalloc(size, gfp_flags);
  65. }
  66. static void r10bio_pool_free(void *r10_bio, void *data)
  67. {
  68. kfree(r10_bio);
  69. }
  70. /* Maximum size of each resync request */
  71. #define RESYNC_BLOCK_SIZE (64*1024)
  72. #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
  73. /* amount of memory to reserve for resync requests */
  74. #define RESYNC_WINDOW (1024*1024)
  75. /* maximum number of concurrent requests, memory permitting */
  76. #define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE)
  77. /*
  78. * When performing a resync, we need to read and compare, so
  79. * we need as many pages are there are copies.
  80. * When performing a recovery, we need 2 bios, one for read,
  81. * one for write (we recover only one drive per r10buf)
  82. *
  83. */
  84. static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
  85. {
  86. conf_t *conf = data;
  87. struct page *page;
  88. r10bio_t *r10_bio;
  89. struct bio *bio;
  90. int i, j;
  91. int nalloc;
  92. r10_bio = r10bio_pool_alloc(gfp_flags, conf);
  93. if (!r10_bio)
  94. return NULL;
  95. if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
  96. nalloc = conf->copies; /* resync */
  97. else
  98. nalloc = 2; /* recovery */
  99. /*
  100. * Allocate bios.
  101. */
  102. for (j = nalloc ; j-- ; ) {
  103. bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
  104. if (!bio)
  105. goto out_free_bio;
  106. r10_bio->devs[j].bio = bio;
  107. }
  108. /*
  109. * Allocate RESYNC_PAGES data pages and attach them
  110. * where needed.
  111. */
  112. for (j = 0 ; j < nalloc; j++) {
  113. bio = r10_bio->devs[j].bio;
  114. for (i = 0; i < RESYNC_PAGES; i++) {
  115. if (j == 1 && !test_bit(MD_RECOVERY_SYNC,
  116. &conf->mddev->recovery)) {
  117. /* we can share bv_page's during recovery */
  118. struct bio *rbio = r10_bio->devs[0].bio;
  119. page = rbio->bi_io_vec[i].bv_page;
  120. get_page(page);
  121. } else
  122. page = alloc_page(gfp_flags);
  123. if (unlikely(!page))
  124. goto out_free_pages;
  125. bio->bi_io_vec[i].bv_page = page;
  126. }
  127. }
  128. return r10_bio;
  129. out_free_pages:
  130. for ( ; i > 0 ; i--)
  131. safe_put_page(bio->bi_io_vec[i-1].bv_page);
  132. while (j--)
  133. for (i = 0; i < RESYNC_PAGES ; i++)
  134. safe_put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page);
  135. j = -1;
  136. out_free_bio:
  137. while ( ++j < nalloc )
  138. bio_put(r10_bio->devs[j].bio);
  139. r10bio_pool_free(r10_bio, conf);
  140. return NULL;
  141. }
  142. static void r10buf_pool_free(void *__r10_bio, void *data)
  143. {
  144. int i;
  145. conf_t *conf = data;
  146. r10bio_t *r10bio = __r10_bio;
  147. int j;
  148. for (j=0; j < conf->copies; j++) {
  149. struct bio *bio = r10bio->devs[j].bio;
  150. if (bio) {
  151. for (i = 0; i < RESYNC_PAGES; i++) {
  152. safe_put_page(bio->bi_io_vec[i].bv_page);
  153. bio->bi_io_vec[i].bv_page = NULL;
  154. }
  155. bio_put(bio);
  156. }
  157. }
  158. r10bio_pool_free(r10bio, conf);
  159. }
  160. static void put_all_bios(conf_t *conf, r10bio_t *r10_bio)
  161. {
  162. int i;
  163. for (i = 0; i < conf->copies; i++) {
  164. struct bio **bio = & r10_bio->devs[i].bio;
  165. if (*bio && *bio != IO_BLOCKED)
  166. bio_put(*bio);
  167. *bio = NULL;
  168. }
  169. }
  170. static void free_r10bio(r10bio_t *r10_bio)
  171. {
  172. conf_t *conf = r10_bio->mddev->private;
  173. put_all_bios(conf, r10_bio);
  174. mempool_free(r10_bio, conf->r10bio_pool);
  175. }
  176. static void put_buf(r10bio_t *r10_bio)
  177. {
  178. conf_t *conf = r10_bio->mddev->private;
  179. mempool_free(r10_bio, conf->r10buf_pool);
  180. lower_barrier(conf);
  181. }
  182. static void reschedule_retry(r10bio_t *r10_bio)
  183. {
  184. unsigned long flags;
  185. mddev_t *mddev = r10_bio->mddev;
  186. conf_t *conf = mddev->private;
  187. spin_lock_irqsave(&conf->device_lock, flags);
  188. list_add(&r10_bio->retry_list, &conf->retry_list);
  189. conf->nr_queued ++;
  190. spin_unlock_irqrestore(&conf->device_lock, flags);
  191. /* wake up frozen array... */
  192. wake_up(&conf->wait_barrier);
  193. md_wakeup_thread(mddev->thread);
  194. }
  195. /*
  196. * raid_end_bio_io() is called when we have finished servicing a mirrored
  197. * operation and are ready to return a success/failure code to the buffer
  198. * cache layer.
  199. */
  200. static void raid_end_bio_io(r10bio_t *r10_bio)
  201. {
  202. struct bio *bio = r10_bio->master_bio;
  203. int done;
  204. conf_t *conf = r10_bio->mddev->private;
  205. if (bio->bi_phys_segments) {
  206. unsigned long flags;
  207. spin_lock_irqsave(&conf->device_lock, flags);
  208. bio->bi_phys_segments--;
  209. done = (bio->bi_phys_segments == 0);
  210. spin_unlock_irqrestore(&conf->device_lock, flags);
  211. } else
  212. done = 1;
  213. if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
  214. clear_bit(BIO_UPTODATE, &bio->bi_flags);
  215. if (done) {
  216. bio_endio(bio, 0);
  217. /*
  218. * Wake up any possible resync thread that waits for the device
  219. * to go idle.
  220. */
  221. allow_barrier(conf);
  222. }
  223. free_r10bio(r10_bio);
  224. }
  225. /*
  226. * Update disk head position estimator based on IRQ completion info.
  227. */
  228. static inline void update_head_pos(int slot, r10bio_t *r10_bio)
  229. {
  230. conf_t *conf = r10_bio->mddev->private;
  231. conf->mirrors[r10_bio->devs[slot].devnum].head_position =
  232. r10_bio->devs[slot].addr + (r10_bio->sectors);
  233. }
  234. /*
  235. * Find the disk number which triggered given bio
  236. */
  237. static int find_bio_disk(conf_t *conf, r10bio_t *r10_bio, struct bio *bio)
  238. {
  239. int slot;
  240. for (slot = 0; slot < conf->copies; slot++)
  241. if (r10_bio->devs[slot].bio == bio)
  242. break;
  243. BUG_ON(slot == conf->copies);
  244. update_head_pos(slot, r10_bio);
  245. return r10_bio->devs[slot].devnum;
  246. }
  247. static void raid10_end_read_request(struct bio *bio, int error)
  248. {
  249. int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  250. r10bio_t *r10_bio = bio->bi_private;
  251. int slot, dev;
  252. conf_t *conf = r10_bio->mddev->private;
  253. slot = r10_bio->read_slot;
  254. dev = r10_bio->devs[slot].devnum;
  255. /*
  256. * this branch is our 'one mirror IO has finished' event handler:
  257. */
  258. update_head_pos(slot, r10_bio);
  259. if (uptodate) {
  260. /*
  261. * Set R10BIO_Uptodate in our master bio, so that
  262. * we will return a good error code to the higher
  263. * levels even if IO on some other mirrored buffer fails.
  264. *
  265. * The 'master' represents the composite IO operation to
  266. * user-side. So if something waits for IO, then it will
  267. * wait for the 'master' bio.
  268. */
  269. set_bit(R10BIO_Uptodate, &r10_bio->state);
  270. raid_end_bio_io(r10_bio);
  271. rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
  272. } else {
  273. /*
  274. * oops, read error - keep the refcount on the rdev
  275. */
  276. char b[BDEVNAME_SIZE];
  277. printk_ratelimited(KERN_ERR
  278. "md/raid10:%s: %s: rescheduling sector %llu\n",
  279. mdname(conf->mddev),
  280. bdevname(conf->mirrors[dev].rdev->bdev, b),
  281. (unsigned long long)r10_bio->sector);
  282. set_bit(R10BIO_ReadError, &r10_bio->state);
  283. reschedule_retry(r10_bio);
  284. }
  285. }
  286. static void raid10_end_write_request(struct bio *bio, int error)
  287. {
  288. int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  289. r10bio_t *r10_bio = bio->bi_private;
  290. int dev;
  291. conf_t *conf = r10_bio->mddev->private;
  292. dev = find_bio_disk(conf, r10_bio, bio);
  293. /*
  294. * this branch is our 'one mirror IO has finished' event handler:
  295. */
  296. if (!uptodate) {
  297. md_error(r10_bio->mddev, conf->mirrors[dev].rdev);
  298. /* an I/O failed, we can't clear the bitmap */
  299. set_bit(R10BIO_Degraded, &r10_bio->state);
  300. } else
  301. /*
  302. * Set R10BIO_Uptodate in our master bio, so that
  303. * we will return a good error code for to the higher
  304. * levels even if IO on some other mirrored buffer fails.
  305. *
  306. * The 'master' represents the composite IO operation to
  307. * user-side. So if something waits for IO, then it will
  308. * wait for the 'master' bio.
  309. */
  310. set_bit(R10BIO_Uptodate, &r10_bio->state);
  311. /*
  312. *
  313. * Let's see if all mirrored write operations have finished
  314. * already.
  315. */
  316. if (atomic_dec_and_test(&r10_bio->remaining)) {
  317. /* clear the bitmap if all writes complete successfully */
  318. bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
  319. r10_bio->sectors,
  320. !test_bit(R10BIO_Degraded, &r10_bio->state),
  321. 0);
  322. md_write_end(r10_bio->mddev);
  323. raid_end_bio_io(r10_bio);
  324. }
  325. rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
  326. }
  327. /*
  328. * RAID10 layout manager
  329. * As well as the chunksize and raid_disks count, there are two
  330. * parameters: near_copies and far_copies.
  331. * near_copies * far_copies must be <= raid_disks.
  332. * Normally one of these will be 1.
  333. * If both are 1, we get raid0.
  334. * If near_copies == raid_disks, we get raid1.
  335. *
  336. * Chunks are laid out in raid0 style with near_copies copies of the
  337. * first chunk, followed by near_copies copies of the next chunk and
  338. * so on.
  339. * If far_copies > 1, then after 1/far_copies of the array has been assigned
  340. * as described above, we start again with a device offset of near_copies.
  341. * So we effectively have another copy of the whole array further down all
  342. * the drives, but with blocks on different drives.
  343. * With this layout, and block is never stored twice on the one device.
  344. *
  345. * raid10_find_phys finds the sector offset of a given virtual sector
  346. * on each device that it is on.
  347. *
  348. * raid10_find_virt does the reverse mapping, from a device and a
  349. * sector offset to a virtual address
  350. */
  351. static void raid10_find_phys(conf_t *conf, r10bio_t *r10bio)
  352. {
  353. int n,f;
  354. sector_t sector;
  355. sector_t chunk;
  356. sector_t stripe;
  357. int dev;
  358. int slot = 0;
  359. /* now calculate first sector/dev */
  360. chunk = r10bio->sector >> conf->chunk_shift;
  361. sector = r10bio->sector & conf->chunk_mask;
  362. chunk *= conf->near_copies;
  363. stripe = chunk;
  364. dev = sector_div(stripe, conf->raid_disks);
  365. if (conf->far_offset)
  366. stripe *= conf->far_copies;
  367. sector += stripe << conf->chunk_shift;
  368. /* and calculate all the others */
  369. for (n=0; n < conf->near_copies; n++) {
  370. int d = dev;
  371. sector_t s = sector;
  372. r10bio->devs[slot].addr = sector;
  373. r10bio->devs[slot].devnum = d;
  374. slot++;
  375. for (f = 1; f < conf->far_copies; f++) {
  376. d += conf->near_copies;
  377. if (d >= conf->raid_disks)
  378. d -= conf->raid_disks;
  379. s += conf->stride;
  380. r10bio->devs[slot].devnum = d;
  381. r10bio->devs[slot].addr = s;
  382. slot++;
  383. }
  384. dev++;
  385. if (dev >= conf->raid_disks) {
  386. dev = 0;
  387. sector += (conf->chunk_mask + 1);
  388. }
  389. }
  390. BUG_ON(slot != conf->copies);
  391. }
  392. static sector_t raid10_find_virt(conf_t *conf, sector_t sector, int dev)
  393. {
  394. sector_t offset, chunk, vchunk;
  395. offset = sector & conf->chunk_mask;
  396. if (conf->far_offset) {
  397. int fc;
  398. chunk = sector >> conf->chunk_shift;
  399. fc = sector_div(chunk, conf->far_copies);
  400. dev -= fc * conf->near_copies;
  401. if (dev < 0)
  402. dev += conf->raid_disks;
  403. } else {
  404. while (sector >= conf->stride) {
  405. sector -= conf->stride;
  406. if (dev < conf->near_copies)
  407. dev += conf->raid_disks - conf->near_copies;
  408. else
  409. dev -= conf->near_copies;
  410. }
  411. chunk = sector >> conf->chunk_shift;
  412. }
  413. vchunk = chunk * conf->raid_disks + dev;
  414. sector_div(vchunk, conf->near_copies);
  415. return (vchunk << conf->chunk_shift) + offset;
  416. }
  417. /**
  418. * raid10_mergeable_bvec -- tell bio layer if a two requests can be merged
  419. * @q: request queue
  420. * @bvm: properties of new bio
  421. * @biovec: the request that could be merged to it.
  422. *
  423. * Return amount of bytes we can accept at this offset
  424. * If near_copies == raid_disk, there are no striping issues,
  425. * but in that case, the function isn't called at all.
  426. */
  427. static int raid10_mergeable_bvec(struct request_queue *q,
  428. struct bvec_merge_data *bvm,
  429. struct bio_vec *biovec)
  430. {
  431. mddev_t *mddev = q->queuedata;
  432. sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
  433. int max;
  434. unsigned int chunk_sectors = mddev->chunk_sectors;
  435. unsigned int bio_sectors = bvm->bi_size >> 9;
  436. max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
  437. if (max < 0) max = 0; /* bio_add cannot handle a negative return */
  438. if (max <= biovec->bv_len && bio_sectors == 0)
  439. return biovec->bv_len;
  440. else
  441. return max;
  442. }
  443. /*
  444. * This routine returns the disk from which the requested read should
  445. * be done. There is a per-array 'next expected sequential IO' sector
  446. * number - if this matches on the next IO then we use the last disk.
  447. * There is also a per-disk 'last know head position' sector that is
  448. * maintained from IRQ contexts, both the normal and the resync IO
  449. * completion handlers update this position correctly. If there is no
  450. * perfect sequential match then we pick the disk whose head is closest.
  451. *
  452. * If there are 2 mirrors in the same 2 devices, performance degrades
  453. * because position is mirror, not device based.
  454. *
  455. * The rdev for the device selected will have nr_pending incremented.
  456. */
  457. /*
  458. * FIXME: possibly should rethink readbalancing and do it differently
  459. * depending on near_copies / far_copies geometry.
  460. */
  461. static int read_balance(conf_t *conf, r10bio_t *r10_bio, int *max_sectors)
  462. {
  463. const sector_t this_sector = r10_bio->sector;
  464. int disk, slot;
  465. int sectors = r10_bio->sectors;
  466. int best_good_sectors;
  467. sector_t new_distance, best_dist;
  468. mdk_rdev_t *rdev;
  469. int do_balance;
  470. int best_slot;
  471. raid10_find_phys(conf, r10_bio);
  472. rcu_read_lock();
  473. retry:
  474. sectors = r10_bio->sectors;
  475. best_slot = -1;
  476. best_dist = MaxSector;
  477. best_good_sectors = 0;
  478. do_balance = 1;
  479. /*
  480. * Check if we can balance. We can balance on the whole
  481. * device if no resync is going on (recovery is ok), or below
  482. * the resync window. We take the first readable disk when
  483. * above the resync window.
  484. */
  485. if (conf->mddev->recovery_cp < MaxSector
  486. && (this_sector + sectors >= conf->next_resync))
  487. do_balance = 0;
  488. for (slot = 0; slot < conf->copies ; slot++) {
  489. sector_t first_bad;
  490. int bad_sectors;
  491. sector_t dev_sector;
  492. if (r10_bio->devs[slot].bio == IO_BLOCKED)
  493. continue;
  494. disk = r10_bio->devs[slot].devnum;
  495. rdev = rcu_dereference(conf->mirrors[disk].rdev);
  496. if (rdev == NULL)
  497. continue;
  498. if (!test_bit(In_sync, &rdev->flags))
  499. continue;
  500. dev_sector = r10_bio->devs[slot].addr;
  501. if (is_badblock(rdev, dev_sector, sectors,
  502. &first_bad, &bad_sectors)) {
  503. if (best_dist < MaxSector)
  504. /* Already have a better slot */
  505. continue;
  506. if (first_bad <= dev_sector) {
  507. /* Cannot read here. If this is the
  508. * 'primary' device, then we must not read
  509. * beyond 'bad_sectors' from another device.
  510. */
  511. bad_sectors -= (dev_sector - first_bad);
  512. if (!do_balance && sectors > bad_sectors)
  513. sectors = bad_sectors;
  514. if (best_good_sectors > sectors)
  515. best_good_sectors = sectors;
  516. } else {
  517. sector_t good_sectors =
  518. first_bad - dev_sector;
  519. if (good_sectors > best_good_sectors) {
  520. best_good_sectors = good_sectors;
  521. best_slot = slot;
  522. }
  523. if (!do_balance)
  524. /* Must read from here */
  525. break;
  526. }
  527. continue;
  528. } else
  529. best_good_sectors = sectors;
  530. if (!do_balance)
  531. break;
  532. /* This optimisation is debatable, and completely destroys
  533. * sequential read speed for 'far copies' arrays. So only
  534. * keep it for 'near' arrays, and review those later.
  535. */
  536. if (conf->near_copies > 1 && !atomic_read(&rdev->nr_pending))
  537. break;
  538. /* for far > 1 always use the lowest address */
  539. if (conf->far_copies > 1)
  540. new_distance = r10_bio->devs[slot].addr;
  541. else
  542. new_distance = abs(r10_bio->devs[slot].addr -
  543. conf->mirrors[disk].head_position);
  544. if (new_distance < best_dist) {
  545. best_dist = new_distance;
  546. best_slot = slot;
  547. }
  548. }
  549. if (slot == conf->copies)
  550. slot = best_slot;
  551. if (slot >= 0) {
  552. disk = r10_bio->devs[slot].devnum;
  553. rdev = rcu_dereference(conf->mirrors[disk].rdev);
  554. if (!rdev)
  555. goto retry;
  556. atomic_inc(&rdev->nr_pending);
  557. if (test_bit(Faulty, &rdev->flags)) {
  558. /* Cannot risk returning a device that failed
  559. * before we inc'ed nr_pending
  560. */
  561. rdev_dec_pending(rdev, conf->mddev);
  562. goto retry;
  563. }
  564. r10_bio->read_slot = slot;
  565. } else
  566. disk = -1;
  567. rcu_read_unlock();
  568. *max_sectors = best_good_sectors;
  569. return disk;
  570. }
  571. static int raid10_congested(void *data, int bits)
  572. {
  573. mddev_t *mddev = data;
  574. conf_t *conf = mddev->private;
  575. int i, ret = 0;
  576. if (mddev_congested(mddev, bits))
  577. return 1;
  578. rcu_read_lock();
  579. for (i = 0; i < conf->raid_disks && ret == 0; i++) {
  580. mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
  581. if (rdev && !test_bit(Faulty, &rdev->flags)) {
  582. struct request_queue *q = bdev_get_queue(rdev->bdev);
  583. ret |= bdi_congested(&q->backing_dev_info, bits);
  584. }
  585. }
  586. rcu_read_unlock();
  587. return ret;
  588. }
  589. static void flush_pending_writes(conf_t *conf)
  590. {
  591. /* Any writes that have been queued but are awaiting
  592. * bitmap updates get flushed here.
  593. */
  594. spin_lock_irq(&conf->device_lock);
  595. if (conf->pending_bio_list.head) {
  596. struct bio *bio;
  597. bio = bio_list_get(&conf->pending_bio_list);
  598. spin_unlock_irq(&conf->device_lock);
  599. /* flush any pending bitmap writes to disk
  600. * before proceeding w/ I/O */
  601. bitmap_unplug(conf->mddev->bitmap);
  602. while (bio) { /* submit pending writes */
  603. struct bio *next = bio->bi_next;
  604. bio->bi_next = NULL;
  605. generic_make_request(bio);
  606. bio = next;
  607. }
  608. } else
  609. spin_unlock_irq(&conf->device_lock);
  610. }
  611. /* Barriers....
  612. * Sometimes we need to suspend IO while we do something else,
  613. * either some resync/recovery, or reconfigure the array.
  614. * To do this we raise a 'barrier'.
  615. * The 'barrier' is a counter that can be raised multiple times
  616. * to count how many activities are happening which preclude
  617. * normal IO.
  618. * We can only raise the barrier if there is no pending IO.
  619. * i.e. if nr_pending == 0.
  620. * We choose only to raise the barrier if no-one is waiting for the
  621. * barrier to go down. This means that as soon as an IO request
  622. * is ready, no other operations which require a barrier will start
  623. * until the IO request has had a chance.
  624. *
  625. * So: regular IO calls 'wait_barrier'. When that returns there
  626. * is no backgroup IO happening, It must arrange to call
  627. * allow_barrier when it has finished its IO.
  628. * backgroup IO calls must call raise_barrier. Once that returns
  629. * there is no normal IO happeing. It must arrange to call
  630. * lower_barrier when the particular background IO completes.
  631. */
  632. static void raise_barrier(conf_t *conf, int force)
  633. {
  634. BUG_ON(force && !conf->barrier);
  635. spin_lock_irq(&conf->resync_lock);
  636. /* Wait until no block IO is waiting (unless 'force') */
  637. wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
  638. conf->resync_lock, );
  639. /* block any new IO from starting */
  640. conf->barrier++;
  641. /* Now wait for all pending IO to complete */
  642. wait_event_lock_irq(conf->wait_barrier,
  643. !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
  644. conf->resync_lock, );
  645. spin_unlock_irq(&conf->resync_lock);
  646. }
  647. static void lower_barrier(conf_t *conf)
  648. {
  649. unsigned long flags;
  650. spin_lock_irqsave(&conf->resync_lock, flags);
  651. conf->barrier--;
  652. spin_unlock_irqrestore(&conf->resync_lock, flags);
  653. wake_up(&conf->wait_barrier);
  654. }
  655. static void wait_barrier(conf_t *conf)
  656. {
  657. spin_lock_irq(&conf->resync_lock);
  658. if (conf->barrier) {
  659. conf->nr_waiting++;
  660. wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
  661. conf->resync_lock,
  662. );
  663. conf->nr_waiting--;
  664. }
  665. conf->nr_pending++;
  666. spin_unlock_irq(&conf->resync_lock);
  667. }
  668. static void allow_barrier(conf_t *conf)
  669. {
  670. unsigned long flags;
  671. spin_lock_irqsave(&conf->resync_lock, flags);
  672. conf->nr_pending--;
  673. spin_unlock_irqrestore(&conf->resync_lock, flags);
  674. wake_up(&conf->wait_barrier);
  675. }
  676. static void freeze_array(conf_t *conf)
  677. {
  678. /* stop syncio and normal IO and wait for everything to
  679. * go quiet.
  680. * We increment barrier and nr_waiting, and then
  681. * wait until nr_pending match nr_queued+1
  682. * This is called in the context of one normal IO request
  683. * that has failed. Thus any sync request that might be pending
  684. * will be blocked by nr_pending, and we need to wait for
  685. * pending IO requests to complete or be queued for re-try.
  686. * Thus the number queued (nr_queued) plus this request (1)
  687. * must match the number of pending IOs (nr_pending) before
  688. * we continue.
  689. */
  690. spin_lock_irq(&conf->resync_lock);
  691. conf->barrier++;
  692. conf->nr_waiting++;
  693. wait_event_lock_irq(conf->wait_barrier,
  694. conf->nr_pending == conf->nr_queued+1,
  695. conf->resync_lock,
  696. flush_pending_writes(conf));
  697. spin_unlock_irq(&conf->resync_lock);
  698. }
  699. static void unfreeze_array(conf_t *conf)
  700. {
  701. /* reverse the effect of the freeze */
  702. spin_lock_irq(&conf->resync_lock);
  703. conf->barrier--;
  704. conf->nr_waiting--;
  705. wake_up(&conf->wait_barrier);
  706. spin_unlock_irq(&conf->resync_lock);
  707. }
  708. static int make_request(mddev_t *mddev, struct bio * bio)
  709. {
  710. conf_t *conf = mddev->private;
  711. mirror_info_t *mirror;
  712. r10bio_t *r10_bio;
  713. struct bio *read_bio;
  714. int i;
  715. int chunk_sects = conf->chunk_mask + 1;
  716. const int rw = bio_data_dir(bio);
  717. const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
  718. const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
  719. unsigned long flags;
  720. mdk_rdev_t *blocked_rdev;
  721. int plugged;
  722. if (unlikely(bio->bi_rw & REQ_FLUSH)) {
  723. md_flush_request(mddev, bio);
  724. return 0;
  725. }
  726. /* If this request crosses a chunk boundary, we need to
  727. * split it. This will only happen for 1 PAGE (or less) requests.
  728. */
  729. if (unlikely( (bio->bi_sector & conf->chunk_mask) + (bio->bi_size >> 9)
  730. > chunk_sects &&
  731. conf->near_copies < conf->raid_disks)) {
  732. struct bio_pair *bp;
  733. /* Sanity check -- queue functions should prevent this happening */
  734. if (bio->bi_vcnt != 1 ||
  735. bio->bi_idx != 0)
  736. goto bad_map;
  737. /* This is a one page bio that upper layers
  738. * refuse to split for us, so we need to split it.
  739. */
  740. bp = bio_split(bio,
  741. chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
  742. /* Each of these 'make_request' calls will call 'wait_barrier'.
  743. * If the first succeeds but the second blocks due to the resync
  744. * thread raising the barrier, we will deadlock because the
  745. * IO to the underlying device will be queued in generic_make_request
  746. * and will never complete, so will never reduce nr_pending.
  747. * So increment nr_waiting here so no new raise_barriers will
  748. * succeed, and so the second wait_barrier cannot block.
  749. */
  750. spin_lock_irq(&conf->resync_lock);
  751. conf->nr_waiting++;
  752. spin_unlock_irq(&conf->resync_lock);
  753. if (make_request(mddev, &bp->bio1))
  754. generic_make_request(&bp->bio1);
  755. if (make_request(mddev, &bp->bio2))
  756. generic_make_request(&bp->bio2);
  757. spin_lock_irq(&conf->resync_lock);
  758. conf->nr_waiting--;
  759. wake_up(&conf->wait_barrier);
  760. spin_unlock_irq(&conf->resync_lock);
  761. bio_pair_release(bp);
  762. return 0;
  763. bad_map:
  764. printk("md/raid10:%s: make_request bug: can't convert block across chunks"
  765. " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2,
  766. (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
  767. bio_io_error(bio);
  768. return 0;
  769. }
  770. md_write_start(mddev, bio);
  771. /*
  772. * Register the new request and wait if the reconstruction
  773. * thread has put up a bar for new requests.
  774. * Continue immediately if no resync is active currently.
  775. */
  776. wait_barrier(conf);
  777. r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
  778. r10_bio->master_bio = bio;
  779. r10_bio->sectors = bio->bi_size >> 9;
  780. r10_bio->mddev = mddev;
  781. r10_bio->sector = bio->bi_sector;
  782. r10_bio->state = 0;
  783. /* We might need to issue multiple reads to different
  784. * devices if there are bad blocks around, so we keep
  785. * track of the number of reads in bio->bi_phys_segments.
  786. * If this is 0, there is only one r10_bio and no locking
  787. * will be needed when the request completes. If it is
  788. * non-zero, then it is the number of not-completed requests.
  789. */
  790. bio->bi_phys_segments = 0;
  791. clear_bit(BIO_SEG_VALID, &bio->bi_flags);
  792. if (rw == READ) {
  793. /*
  794. * read balancing logic:
  795. */
  796. int max_sectors;
  797. int disk;
  798. int slot;
  799. read_again:
  800. disk = read_balance(conf, r10_bio, &max_sectors);
  801. slot = r10_bio->read_slot;
  802. if (disk < 0) {
  803. raid_end_bio_io(r10_bio);
  804. return 0;
  805. }
  806. mirror = conf->mirrors + disk;
  807. read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
  808. md_trim_bio(read_bio, r10_bio->sector - bio->bi_sector,
  809. max_sectors);
  810. r10_bio->devs[slot].bio = read_bio;
  811. read_bio->bi_sector = r10_bio->devs[slot].addr +
  812. mirror->rdev->data_offset;
  813. read_bio->bi_bdev = mirror->rdev->bdev;
  814. read_bio->bi_end_io = raid10_end_read_request;
  815. read_bio->bi_rw = READ | do_sync;
  816. read_bio->bi_private = r10_bio;
  817. if (max_sectors < r10_bio->sectors) {
  818. /* Could not read all from this device, so we will
  819. * need another r10_bio.
  820. */
  821. int sectors_handled;
  822. sectors_handled = (r10_bio->sectors + max_sectors
  823. - bio->bi_sector);
  824. r10_bio->sectors = max_sectors;
  825. spin_lock_irq(&conf->device_lock);
  826. if (bio->bi_phys_segments == 0)
  827. bio->bi_phys_segments = 2;
  828. else
  829. bio->bi_phys_segments++;
  830. spin_unlock(&conf->device_lock);
  831. /* Cannot call generic_make_request directly
  832. * as that will be queued in __generic_make_request
  833. * and subsequent mempool_alloc might block
  834. * waiting for it. so hand bio over to raid10d.
  835. */
  836. reschedule_retry(r10_bio);
  837. r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
  838. r10_bio->master_bio = bio;
  839. r10_bio->sectors = ((bio->bi_size >> 9)
  840. - sectors_handled);
  841. r10_bio->state = 0;
  842. r10_bio->mddev = mddev;
  843. r10_bio->sector = bio->bi_sector + sectors_handled;
  844. goto read_again;
  845. } else
  846. generic_make_request(read_bio);
  847. return 0;
  848. }
  849. /*
  850. * WRITE:
  851. */
  852. /* first select target devices under rcu_lock and
  853. * inc refcount on their rdev. Record them by setting
  854. * bios[x] to bio
  855. */
  856. plugged = mddev_check_plugged(mddev);
  857. raid10_find_phys(conf, r10_bio);
  858. retry_write:
  859. blocked_rdev = NULL;
  860. rcu_read_lock();
  861. for (i = 0; i < conf->copies; i++) {
  862. int d = r10_bio->devs[i].devnum;
  863. mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[d].rdev);
  864. if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
  865. atomic_inc(&rdev->nr_pending);
  866. blocked_rdev = rdev;
  867. break;
  868. }
  869. if (rdev && !test_bit(Faulty, &rdev->flags)) {
  870. atomic_inc(&rdev->nr_pending);
  871. r10_bio->devs[i].bio = bio;
  872. } else {
  873. r10_bio->devs[i].bio = NULL;
  874. set_bit(R10BIO_Degraded, &r10_bio->state);
  875. }
  876. }
  877. rcu_read_unlock();
  878. if (unlikely(blocked_rdev)) {
  879. /* Have to wait for this device to get unblocked, then retry */
  880. int j;
  881. int d;
  882. for (j = 0; j < i; j++)
  883. if (r10_bio->devs[j].bio) {
  884. d = r10_bio->devs[j].devnum;
  885. rdev_dec_pending(conf->mirrors[d].rdev, mddev);
  886. }
  887. allow_barrier(conf);
  888. md_wait_for_blocked_rdev(blocked_rdev, mddev);
  889. wait_barrier(conf);
  890. goto retry_write;
  891. }
  892. atomic_set(&r10_bio->remaining, 1);
  893. bitmap_startwrite(mddev->bitmap, bio->bi_sector, r10_bio->sectors, 0);
  894. for (i = 0; i < conf->copies; i++) {
  895. struct bio *mbio;
  896. int d = r10_bio->devs[i].devnum;
  897. if (!r10_bio->devs[i].bio)
  898. continue;
  899. mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
  900. r10_bio->devs[i].bio = mbio;
  901. mbio->bi_sector = r10_bio->devs[i].addr+
  902. conf->mirrors[d].rdev->data_offset;
  903. mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
  904. mbio->bi_end_io = raid10_end_write_request;
  905. mbio->bi_rw = WRITE | do_sync | do_fua;
  906. mbio->bi_private = r10_bio;
  907. atomic_inc(&r10_bio->remaining);
  908. spin_lock_irqsave(&conf->device_lock, flags);
  909. bio_list_add(&conf->pending_bio_list, mbio);
  910. spin_unlock_irqrestore(&conf->device_lock, flags);
  911. }
  912. if (atomic_dec_and_test(&r10_bio->remaining)) {
  913. /* This matches the end of raid10_end_write_request() */
  914. bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
  915. r10_bio->sectors,
  916. !test_bit(R10BIO_Degraded, &r10_bio->state),
  917. 0);
  918. md_write_end(mddev);
  919. raid_end_bio_io(r10_bio);
  920. }
  921. /* In case raid10d snuck in to freeze_array */
  922. wake_up(&conf->wait_barrier);
  923. if (do_sync || !mddev->bitmap || !plugged)
  924. md_wakeup_thread(mddev->thread);
  925. return 0;
  926. }
  927. static void status(struct seq_file *seq, mddev_t *mddev)
  928. {
  929. conf_t *conf = mddev->private;
  930. int i;
  931. if (conf->near_copies < conf->raid_disks)
  932. seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2);
  933. if (conf->near_copies > 1)
  934. seq_printf(seq, " %d near-copies", conf->near_copies);
  935. if (conf->far_copies > 1) {
  936. if (conf->far_offset)
  937. seq_printf(seq, " %d offset-copies", conf->far_copies);
  938. else
  939. seq_printf(seq, " %d far-copies", conf->far_copies);
  940. }
  941. seq_printf(seq, " [%d/%d] [", conf->raid_disks,
  942. conf->raid_disks - mddev->degraded);
  943. for (i = 0; i < conf->raid_disks; i++)
  944. seq_printf(seq, "%s",
  945. conf->mirrors[i].rdev &&
  946. test_bit(In_sync, &conf->mirrors[i].rdev->flags) ? "U" : "_");
  947. seq_printf(seq, "]");
  948. }
  949. /* check if there are enough drives for
  950. * every block to appear on atleast one.
  951. * Don't consider the device numbered 'ignore'
  952. * as we might be about to remove it.
  953. */
  954. static int enough(conf_t *conf, int ignore)
  955. {
  956. int first = 0;
  957. do {
  958. int n = conf->copies;
  959. int cnt = 0;
  960. while (n--) {
  961. if (conf->mirrors[first].rdev &&
  962. first != ignore)
  963. cnt++;
  964. first = (first+1) % conf->raid_disks;
  965. }
  966. if (cnt == 0)
  967. return 0;
  968. } while (first != 0);
  969. return 1;
  970. }
  971. static void error(mddev_t *mddev, mdk_rdev_t *rdev)
  972. {
  973. char b[BDEVNAME_SIZE];
  974. conf_t *conf = mddev->private;
  975. /*
  976. * If it is not operational, then we have already marked it as dead
  977. * else if it is the last working disks, ignore the error, let the
  978. * next level up know.
  979. * else mark the drive as failed
  980. */
  981. if (test_bit(In_sync, &rdev->flags)
  982. && !enough(conf, rdev->raid_disk))
  983. /*
  984. * Don't fail the drive, just return an IO error.
  985. */
  986. return;
  987. if (test_and_clear_bit(In_sync, &rdev->flags)) {
  988. unsigned long flags;
  989. spin_lock_irqsave(&conf->device_lock, flags);
  990. mddev->degraded++;
  991. spin_unlock_irqrestore(&conf->device_lock, flags);
  992. /*
  993. * if recovery is running, make sure it aborts.
  994. */
  995. set_bit(MD_RECOVERY_INTR, &mddev->recovery);
  996. }
  997. set_bit(Blocked, &rdev->flags);
  998. set_bit(Faulty, &rdev->flags);
  999. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  1000. printk(KERN_ALERT
  1001. "md/raid10:%s: Disk failure on %s, disabling device.\n"
  1002. "md/raid10:%s: Operation continuing on %d devices.\n",
  1003. mdname(mddev), bdevname(rdev->bdev, b),
  1004. mdname(mddev), conf->raid_disks - mddev->degraded);
  1005. }
  1006. static void print_conf(conf_t *conf)
  1007. {
  1008. int i;
  1009. mirror_info_t *tmp;
  1010. printk(KERN_DEBUG "RAID10 conf printout:\n");
  1011. if (!conf) {
  1012. printk(KERN_DEBUG "(!conf)\n");
  1013. return;
  1014. }
  1015. printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
  1016. conf->raid_disks);
  1017. for (i = 0; i < conf->raid_disks; i++) {
  1018. char b[BDEVNAME_SIZE];
  1019. tmp = conf->mirrors + i;
  1020. if (tmp->rdev)
  1021. printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n",
  1022. i, !test_bit(In_sync, &tmp->rdev->flags),
  1023. !test_bit(Faulty, &tmp->rdev->flags),
  1024. bdevname(tmp->rdev->bdev,b));
  1025. }
  1026. }
  1027. static void close_sync(conf_t *conf)
  1028. {
  1029. wait_barrier(conf);
  1030. allow_barrier(conf);
  1031. mempool_destroy(conf->r10buf_pool);
  1032. conf->r10buf_pool = NULL;
  1033. }
  1034. static int raid10_spare_active(mddev_t *mddev)
  1035. {
  1036. int i;
  1037. conf_t *conf = mddev->private;
  1038. mirror_info_t *tmp;
  1039. int count = 0;
  1040. unsigned long flags;
  1041. /*
  1042. * Find all non-in_sync disks within the RAID10 configuration
  1043. * and mark them in_sync
  1044. */
  1045. for (i = 0; i < conf->raid_disks; i++) {
  1046. tmp = conf->mirrors + i;
  1047. if (tmp->rdev
  1048. && !test_bit(Faulty, &tmp->rdev->flags)
  1049. && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
  1050. count++;
  1051. sysfs_notify_dirent(tmp->rdev->sysfs_state);
  1052. }
  1053. }
  1054. spin_lock_irqsave(&conf->device_lock, flags);
  1055. mddev->degraded -= count;
  1056. spin_unlock_irqrestore(&conf->device_lock, flags);
  1057. print_conf(conf);
  1058. return count;
  1059. }
  1060. static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
  1061. {
  1062. conf_t *conf = mddev->private;
  1063. int err = -EEXIST;
  1064. int mirror;
  1065. int first = 0;
  1066. int last = conf->raid_disks - 1;
  1067. if (mddev->recovery_cp < MaxSector)
  1068. /* only hot-add to in-sync arrays, as recovery is
  1069. * very different from resync
  1070. */
  1071. return -EBUSY;
  1072. if (!enough(conf, -1))
  1073. return -EINVAL;
  1074. if (rdev->raid_disk >= 0)
  1075. first = last = rdev->raid_disk;
  1076. if (rdev->saved_raid_disk >= first &&
  1077. conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
  1078. mirror = rdev->saved_raid_disk;
  1079. else
  1080. mirror = first;
  1081. for ( ; mirror <= last ; mirror++) {
  1082. mirror_info_t *p = &conf->mirrors[mirror];
  1083. if (p->recovery_disabled == mddev->recovery_disabled)
  1084. continue;
  1085. if (!p->rdev)
  1086. continue;
  1087. disk_stack_limits(mddev->gendisk, rdev->bdev,
  1088. rdev->data_offset << 9);
  1089. /* as we don't honour merge_bvec_fn, we must
  1090. * never risk violating it, so limit
  1091. * ->max_segments to one lying with a single
  1092. * page, as a one page request is never in
  1093. * violation.
  1094. */
  1095. if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
  1096. blk_queue_max_segments(mddev->queue, 1);
  1097. blk_queue_segment_boundary(mddev->queue,
  1098. PAGE_CACHE_SIZE - 1);
  1099. }
  1100. p->head_position = 0;
  1101. rdev->raid_disk = mirror;
  1102. err = 0;
  1103. if (rdev->saved_raid_disk != mirror)
  1104. conf->fullsync = 1;
  1105. rcu_assign_pointer(p->rdev, rdev);
  1106. break;
  1107. }
  1108. md_integrity_add_rdev(rdev, mddev);
  1109. print_conf(conf);
  1110. return err;
  1111. }
  1112. static int raid10_remove_disk(mddev_t *mddev, int number)
  1113. {
  1114. conf_t *conf = mddev->private;
  1115. int err = 0;
  1116. mdk_rdev_t *rdev;
  1117. mirror_info_t *p = conf->mirrors+ number;
  1118. print_conf(conf);
  1119. rdev = p->rdev;
  1120. if (rdev) {
  1121. if (test_bit(In_sync, &rdev->flags) ||
  1122. atomic_read(&rdev->nr_pending)) {
  1123. err = -EBUSY;
  1124. goto abort;
  1125. }
  1126. /* Only remove faulty devices in recovery
  1127. * is not possible.
  1128. */
  1129. if (!test_bit(Faulty, &rdev->flags) &&
  1130. mddev->recovery_disabled != p->recovery_disabled &&
  1131. enough(conf, -1)) {
  1132. err = -EBUSY;
  1133. goto abort;
  1134. }
  1135. p->rdev = NULL;
  1136. synchronize_rcu();
  1137. if (atomic_read(&rdev->nr_pending)) {
  1138. /* lost the race, try later */
  1139. err = -EBUSY;
  1140. p->rdev = rdev;
  1141. goto abort;
  1142. }
  1143. err = md_integrity_register(mddev);
  1144. }
  1145. abort:
  1146. print_conf(conf);
  1147. return err;
  1148. }
  1149. static void end_sync_read(struct bio *bio, int error)
  1150. {
  1151. r10bio_t *r10_bio = bio->bi_private;
  1152. conf_t *conf = r10_bio->mddev->private;
  1153. int d;
  1154. d = find_bio_disk(conf, r10_bio, bio);
  1155. if (test_bit(BIO_UPTODATE, &bio->bi_flags))
  1156. set_bit(R10BIO_Uptodate, &r10_bio->state);
  1157. else {
  1158. atomic_add(r10_bio->sectors,
  1159. &conf->mirrors[d].rdev->corrected_errors);
  1160. if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
  1161. md_error(r10_bio->mddev,
  1162. conf->mirrors[d].rdev);
  1163. }
  1164. /* for reconstruct, we always reschedule after a read.
  1165. * for resync, only after all reads
  1166. */
  1167. rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
  1168. if (test_bit(R10BIO_IsRecover, &r10_bio->state) ||
  1169. atomic_dec_and_test(&r10_bio->remaining)) {
  1170. /* we have read all the blocks,
  1171. * do the comparison in process context in raid10d
  1172. */
  1173. reschedule_retry(r10_bio);
  1174. }
  1175. }
  1176. static void end_sync_write(struct bio *bio, int error)
  1177. {
  1178. int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  1179. r10bio_t *r10_bio = bio->bi_private;
  1180. mddev_t *mddev = r10_bio->mddev;
  1181. conf_t *conf = mddev->private;
  1182. int d;
  1183. d = find_bio_disk(conf, r10_bio, bio);
  1184. if (!uptodate)
  1185. md_error(mddev, conf->mirrors[d].rdev);
  1186. rdev_dec_pending(conf->mirrors[d].rdev, mddev);
  1187. while (atomic_dec_and_test(&r10_bio->remaining)) {
  1188. if (r10_bio->master_bio == NULL) {
  1189. /* the primary of several recovery bios */
  1190. sector_t s = r10_bio->sectors;
  1191. put_buf(r10_bio);
  1192. md_done_sync(mddev, s, 1);
  1193. break;
  1194. } else {
  1195. r10bio_t *r10_bio2 = (r10bio_t *)r10_bio->master_bio;
  1196. put_buf(r10_bio);
  1197. r10_bio = r10_bio2;
  1198. }
  1199. }
  1200. }
  1201. /*
  1202. * Note: sync and recover and handled very differently for raid10
  1203. * This code is for resync.
  1204. * For resync, we read through virtual addresses and read all blocks.
  1205. * If there is any error, we schedule a write. The lowest numbered
  1206. * drive is authoritative.
  1207. * However requests come for physical address, so we need to map.
  1208. * For every physical address there are raid_disks/copies virtual addresses,
  1209. * which is always are least one, but is not necessarly an integer.
  1210. * This means that a physical address can span multiple chunks, so we may
  1211. * have to submit multiple io requests for a single sync request.
  1212. */
  1213. /*
  1214. * We check if all blocks are in-sync and only write to blocks that
  1215. * aren't in sync
  1216. */
  1217. static void sync_request_write(mddev_t *mddev, r10bio_t *r10_bio)
  1218. {
  1219. conf_t *conf = mddev->private;
  1220. int i, first;
  1221. struct bio *tbio, *fbio;
  1222. atomic_set(&r10_bio->remaining, 1);
  1223. /* find the first device with a block */
  1224. for (i=0; i<conf->copies; i++)
  1225. if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags))
  1226. break;
  1227. if (i == conf->copies)
  1228. goto done;
  1229. first = i;
  1230. fbio = r10_bio->devs[i].bio;
  1231. /* now find blocks with errors */
  1232. for (i=0 ; i < conf->copies ; i++) {
  1233. int j, d;
  1234. int vcnt = r10_bio->sectors >> (PAGE_SHIFT-9);
  1235. tbio = r10_bio->devs[i].bio;
  1236. if (tbio->bi_end_io != end_sync_read)
  1237. continue;
  1238. if (i == first)
  1239. continue;
  1240. if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags)) {
  1241. /* We know that the bi_io_vec layout is the same for
  1242. * both 'first' and 'i', so we just compare them.
  1243. * All vec entries are PAGE_SIZE;
  1244. */
  1245. for (j = 0; j < vcnt; j++)
  1246. if (memcmp(page_address(fbio->bi_io_vec[j].bv_page),
  1247. page_address(tbio->bi_io_vec[j].bv_page),
  1248. PAGE_SIZE))
  1249. break;
  1250. if (j == vcnt)
  1251. continue;
  1252. mddev->resync_mismatches += r10_bio->sectors;
  1253. }
  1254. if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
  1255. /* Don't fix anything. */
  1256. continue;
  1257. /* Ok, we need to write this bio
  1258. * First we need to fixup bv_offset, bv_len and
  1259. * bi_vecs, as the read request might have corrupted these
  1260. */
  1261. tbio->bi_vcnt = vcnt;
  1262. tbio->bi_size = r10_bio->sectors << 9;
  1263. tbio->bi_idx = 0;
  1264. tbio->bi_phys_segments = 0;
  1265. tbio->bi_flags &= ~(BIO_POOL_MASK - 1);
  1266. tbio->bi_flags |= 1 << BIO_UPTODATE;
  1267. tbio->bi_next = NULL;
  1268. tbio->bi_rw = WRITE;
  1269. tbio->bi_private = r10_bio;
  1270. tbio->bi_sector = r10_bio->devs[i].addr;
  1271. for (j=0; j < vcnt ; j++) {
  1272. tbio->bi_io_vec[j].bv_offset = 0;
  1273. tbio->bi_io_vec[j].bv_len = PAGE_SIZE;
  1274. memcpy(page_address(tbio->bi_io_vec[j].bv_page),
  1275. page_address(fbio->bi_io_vec[j].bv_page),
  1276. PAGE_SIZE);
  1277. }
  1278. tbio->bi_end_io = end_sync_write;
  1279. d = r10_bio->devs[i].devnum;
  1280. atomic_inc(&conf->mirrors[d].rdev->nr_pending);
  1281. atomic_inc(&r10_bio->remaining);
  1282. md_sync_acct(conf->mirrors[d].rdev->bdev, tbio->bi_size >> 9);
  1283. tbio->bi_sector += conf->mirrors[d].rdev->data_offset;
  1284. tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
  1285. generic_make_request(tbio);
  1286. }
  1287. done:
  1288. if (atomic_dec_and_test(&r10_bio->remaining)) {
  1289. md_done_sync(mddev, r10_bio->sectors, 1);
  1290. put_buf(r10_bio);
  1291. }
  1292. }
  1293. /*
  1294. * Now for the recovery code.
  1295. * Recovery happens across physical sectors.
  1296. * We recover all non-is_sync drives by finding the virtual address of
  1297. * each, and then choose a working drive that also has that virt address.
  1298. * There is a separate r10_bio for each non-in_sync drive.
  1299. * Only the first two slots are in use. The first for reading,
  1300. * The second for writing.
  1301. *
  1302. */
  1303. static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio)
  1304. {
  1305. conf_t *conf = mddev->private;
  1306. int d;
  1307. struct bio *wbio;
  1308. /*
  1309. * share the pages with the first bio
  1310. * and submit the write request
  1311. */
  1312. wbio = r10_bio->devs[1].bio;
  1313. d = r10_bio->devs[1].devnum;
  1314. atomic_inc(&conf->mirrors[d].rdev->nr_pending);
  1315. md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9);
  1316. if (test_bit(R10BIO_Uptodate, &r10_bio->state))
  1317. generic_make_request(wbio);
  1318. else {
  1319. printk(KERN_NOTICE
  1320. "md/raid10:%s: recovery aborted due to read error\n",
  1321. mdname(mddev));
  1322. conf->mirrors[d].recovery_disabled = mddev->recovery_disabled;
  1323. set_bit(MD_RECOVERY_INTR, &mddev->recovery);
  1324. bio_endio(wbio, 0);
  1325. }
  1326. }
  1327. /*
  1328. * Used by fix_read_error() to decay the per rdev read_errors.
  1329. * We halve the read error count for every hour that has elapsed
  1330. * since the last recorded read error.
  1331. *
  1332. */
  1333. static void check_decay_read_errors(mddev_t *mddev, mdk_rdev_t *rdev)
  1334. {
  1335. struct timespec cur_time_mon;
  1336. unsigned long hours_since_last;
  1337. unsigned int read_errors = atomic_read(&rdev->read_errors);
  1338. ktime_get_ts(&cur_time_mon);
  1339. if (rdev->last_read_error.tv_sec == 0 &&
  1340. rdev->last_read_error.tv_nsec == 0) {
  1341. /* first time we've seen a read error */
  1342. rdev->last_read_error = cur_time_mon;
  1343. return;
  1344. }
  1345. hours_since_last = (cur_time_mon.tv_sec -
  1346. rdev->last_read_error.tv_sec) / 3600;
  1347. rdev->last_read_error = cur_time_mon;
  1348. /*
  1349. * if hours_since_last is > the number of bits in read_errors
  1350. * just set read errors to 0. We do this to avoid
  1351. * overflowing the shift of read_errors by hours_since_last.
  1352. */
  1353. if (hours_since_last >= 8 * sizeof(read_errors))
  1354. atomic_set(&rdev->read_errors, 0);
  1355. else
  1356. atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
  1357. }
  1358. /*
  1359. * This is a kernel thread which:
  1360. *
  1361. * 1. Retries failed read operations on working mirrors.
  1362. * 2. Updates the raid superblock when problems encounter.
  1363. * 3. Performs writes following reads for array synchronising.
  1364. */
  1365. static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
  1366. {
  1367. int sect = 0; /* Offset from r10_bio->sector */
  1368. int sectors = r10_bio->sectors;
  1369. mdk_rdev_t*rdev;
  1370. int max_read_errors = atomic_read(&mddev->max_corr_read_errors);
  1371. int d = r10_bio->devs[r10_bio->read_slot].devnum;
  1372. /* still own a reference to this rdev, so it cannot
  1373. * have been cleared recently.
  1374. */
  1375. rdev = conf->mirrors[d].rdev;
  1376. if (test_bit(Faulty, &rdev->flags))
  1377. /* drive has already been failed, just ignore any
  1378. more fix_read_error() attempts */
  1379. return;
  1380. check_decay_read_errors(mddev, rdev);
  1381. atomic_inc(&rdev->read_errors);
  1382. if (atomic_read(&rdev->read_errors) > max_read_errors) {
  1383. char b[BDEVNAME_SIZE];
  1384. bdevname(rdev->bdev, b);
  1385. printk(KERN_NOTICE
  1386. "md/raid10:%s: %s: Raid device exceeded "
  1387. "read_error threshold [cur %d:max %d]\n",
  1388. mdname(mddev), b,
  1389. atomic_read(&rdev->read_errors), max_read_errors);
  1390. printk(KERN_NOTICE
  1391. "md/raid10:%s: %s: Failing raid device\n",
  1392. mdname(mddev), b);
  1393. md_error(mddev, conf->mirrors[d].rdev);
  1394. return;
  1395. }
  1396. while(sectors) {
  1397. int s = sectors;
  1398. int sl = r10_bio->read_slot;
  1399. int success = 0;
  1400. int start;
  1401. if (s > (PAGE_SIZE>>9))
  1402. s = PAGE_SIZE >> 9;
  1403. rcu_read_lock();
  1404. do {
  1405. sector_t first_bad;
  1406. int bad_sectors;
  1407. d = r10_bio->devs[sl].devnum;
  1408. rdev = rcu_dereference(conf->mirrors[d].rdev);
  1409. if (rdev &&
  1410. test_bit(In_sync, &rdev->flags) &&
  1411. is_badblock(rdev, r10_bio->devs[sl].addr + sect, s,
  1412. &first_bad, &bad_sectors) == 0) {
  1413. atomic_inc(&rdev->nr_pending);
  1414. rcu_read_unlock();
  1415. success = sync_page_io(rdev,
  1416. r10_bio->devs[sl].addr +
  1417. sect,
  1418. s<<9,
  1419. conf->tmppage, READ, false);
  1420. rdev_dec_pending(rdev, mddev);
  1421. rcu_read_lock();
  1422. if (success)
  1423. break;
  1424. }
  1425. sl++;
  1426. if (sl == conf->copies)
  1427. sl = 0;
  1428. } while (!success && sl != r10_bio->read_slot);
  1429. rcu_read_unlock();
  1430. if (!success) {
  1431. /* Cannot read from anywhere -- bye bye array */
  1432. int dn = r10_bio->devs[r10_bio->read_slot].devnum;
  1433. md_error(mddev, conf->mirrors[dn].rdev);
  1434. break;
  1435. }
  1436. start = sl;
  1437. /* write it back and re-read */
  1438. rcu_read_lock();
  1439. while (sl != r10_bio->read_slot) {
  1440. char b[BDEVNAME_SIZE];
  1441. if (sl==0)
  1442. sl = conf->copies;
  1443. sl--;
  1444. d = r10_bio->devs[sl].devnum;
  1445. rdev = rcu_dereference(conf->mirrors[d].rdev);
  1446. if (!rdev ||
  1447. !test_bit(In_sync, &rdev->flags))
  1448. continue;
  1449. atomic_inc(&rdev->nr_pending);
  1450. rcu_read_unlock();
  1451. if (sync_page_io(rdev,
  1452. r10_bio->devs[sl].addr +
  1453. sect,
  1454. s<<9, conf->tmppage, WRITE, false)
  1455. == 0) {
  1456. /* Well, this device is dead */
  1457. printk(KERN_NOTICE
  1458. "md/raid10:%s: read correction "
  1459. "write failed"
  1460. " (%d sectors at %llu on %s)\n",
  1461. mdname(mddev), s,
  1462. (unsigned long long)(
  1463. sect + rdev->data_offset),
  1464. bdevname(rdev->bdev, b));
  1465. printk(KERN_NOTICE "md/raid10:%s: %s: failing "
  1466. "drive\n",
  1467. mdname(mddev),
  1468. bdevname(rdev->bdev, b));
  1469. md_error(mddev, rdev);
  1470. }
  1471. rdev_dec_pending(rdev, mddev);
  1472. rcu_read_lock();
  1473. }
  1474. sl = start;
  1475. while (sl != r10_bio->read_slot) {
  1476. char b[BDEVNAME_SIZE];
  1477. if (sl==0)
  1478. sl = conf->copies;
  1479. sl--;
  1480. d = r10_bio->devs[sl].devnum;
  1481. rdev = rcu_dereference(conf->mirrors[d].rdev);
  1482. if (!rdev ||
  1483. !test_bit(In_sync, &rdev->flags))
  1484. continue;
  1485. atomic_inc(&rdev->nr_pending);
  1486. rcu_read_unlock();
  1487. if (sync_page_io(rdev,
  1488. r10_bio->devs[sl].addr +
  1489. sect,
  1490. s<<9, conf->tmppage,
  1491. READ, false) == 0) {
  1492. /* Well, this device is dead */
  1493. printk(KERN_NOTICE
  1494. "md/raid10:%s: unable to read back "
  1495. "corrected sectors"
  1496. " (%d sectors at %llu on %s)\n",
  1497. mdname(mddev), s,
  1498. (unsigned long long)(
  1499. sect + rdev->data_offset),
  1500. bdevname(rdev->bdev, b));
  1501. printk(KERN_NOTICE "md/raid10:%s: %s: failing "
  1502. "drive\n",
  1503. mdname(mddev),
  1504. bdevname(rdev->bdev, b));
  1505. md_error(mddev, rdev);
  1506. } else {
  1507. printk(KERN_INFO
  1508. "md/raid10:%s: read error corrected"
  1509. " (%d sectors at %llu on %s)\n",
  1510. mdname(mddev), s,
  1511. (unsigned long long)(
  1512. sect + rdev->data_offset),
  1513. bdevname(rdev->bdev, b));
  1514. atomic_add(s, &rdev->corrected_errors);
  1515. }
  1516. rdev_dec_pending(rdev, mddev);
  1517. rcu_read_lock();
  1518. }
  1519. rcu_read_unlock();
  1520. sectors -= s;
  1521. sect += s;
  1522. }
  1523. }
  1524. static void handle_read_error(mddev_t *mddev, r10bio_t *r10_bio)
  1525. {
  1526. int slot = r10_bio->read_slot;
  1527. int mirror = r10_bio->devs[slot].devnum;
  1528. struct bio *bio;
  1529. conf_t *conf = mddev->private;
  1530. mdk_rdev_t *rdev;
  1531. char b[BDEVNAME_SIZE];
  1532. unsigned long do_sync;
  1533. int max_sectors;
  1534. /* we got a read error. Maybe the drive is bad. Maybe just
  1535. * the block and we can fix it.
  1536. * We freeze all other IO, and try reading the block from
  1537. * other devices. When we find one, we re-write
  1538. * and check it that fixes the read error.
  1539. * This is all done synchronously while the array is
  1540. * frozen.
  1541. */
  1542. if (mddev->ro == 0) {
  1543. freeze_array(conf);
  1544. fix_read_error(conf, mddev, r10_bio);
  1545. unfreeze_array(conf);
  1546. }
  1547. rdev_dec_pending(conf->mirrors[mirror].rdev, mddev);
  1548. bio = r10_bio->devs[slot].bio;
  1549. bdevname(bio->bi_bdev, b);
  1550. r10_bio->devs[slot].bio =
  1551. mddev->ro ? IO_BLOCKED : NULL;
  1552. read_more:
  1553. mirror = read_balance(conf, r10_bio, &max_sectors);
  1554. if (mirror == -1) {
  1555. printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O"
  1556. " read error for block %llu\n",
  1557. mdname(mddev), b,
  1558. (unsigned long long)r10_bio->sector);
  1559. raid_end_bio_io(r10_bio);
  1560. bio_put(bio);
  1561. return;
  1562. }
  1563. do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
  1564. if (bio)
  1565. bio_put(bio);
  1566. slot = r10_bio->read_slot;
  1567. rdev = conf->mirrors[mirror].rdev;
  1568. printk_ratelimited(
  1569. KERN_ERR
  1570. "md/raid10:%s: %s: redirecting"
  1571. "sector %llu to another mirror\n",
  1572. mdname(mddev),
  1573. bdevname(rdev->bdev, b),
  1574. (unsigned long long)r10_bio->sector);
  1575. bio = bio_clone_mddev(r10_bio->master_bio,
  1576. GFP_NOIO, mddev);
  1577. md_trim_bio(bio,
  1578. r10_bio->sector - bio->bi_sector,
  1579. max_sectors);
  1580. r10_bio->devs[slot].bio = bio;
  1581. bio->bi_sector = r10_bio->devs[slot].addr
  1582. + rdev->data_offset;
  1583. bio->bi_bdev = rdev->bdev;
  1584. bio->bi_rw = READ | do_sync;
  1585. bio->bi_private = r10_bio;
  1586. bio->bi_end_io = raid10_end_read_request;
  1587. if (max_sectors < r10_bio->sectors) {
  1588. /* Drat - have to split this up more */
  1589. struct bio *mbio = r10_bio->master_bio;
  1590. int sectors_handled =
  1591. r10_bio->sector + max_sectors
  1592. - mbio->bi_sector;
  1593. r10_bio->sectors = max_sectors;
  1594. spin_lock_irq(&conf->device_lock);
  1595. if (mbio->bi_phys_segments == 0)
  1596. mbio->bi_phys_segments = 2;
  1597. else
  1598. mbio->bi_phys_segments++;
  1599. spin_unlock_irq(&conf->device_lock);
  1600. generic_make_request(bio);
  1601. bio = NULL;
  1602. r10_bio = mempool_alloc(conf->r10bio_pool,
  1603. GFP_NOIO);
  1604. r10_bio->master_bio = mbio;
  1605. r10_bio->sectors = (mbio->bi_size >> 9)
  1606. - sectors_handled;
  1607. r10_bio->state = 0;
  1608. set_bit(R10BIO_ReadError,
  1609. &r10_bio->state);
  1610. r10_bio->mddev = mddev;
  1611. r10_bio->sector = mbio->bi_sector
  1612. + sectors_handled;
  1613. goto read_more;
  1614. } else
  1615. generic_make_request(bio);
  1616. }
  1617. static void raid10d(mddev_t *mddev)
  1618. {
  1619. r10bio_t *r10_bio;
  1620. unsigned long flags;
  1621. conf_t *conf = mddev->private;
  1622. struct list_head *head = &conf->retry_list;
  1623. struct blk_plug plug;
  1624. md_check_recovery(mddev);
  1625. blk_start_plug(&plug);
  1626. for (;;) {
  1627. flush_pending_writes(conf);
  1628. spin_lock_irqsave(&conf->device_lock, flags);
  1629. if (list_empty(head)) {
  1630. spin_unlock_irqrestore(&conf->device_lock, flags);
  1631. break;
  1632. }
  1633. r10_bio = list_entry(head->prev, r10bio_t, retry_list);
  1634. list_del(head->prev);
  1635. conf->nr_queued--;
  1636. spin_unlock_irqrestore(&conf->device_lock, flags);
  1637. mddev = r10_bio->mddev;
  1638. conf = mddev->private;
  1639. if (test_bit(R10BIO_IsSync, &r10_bio->state))
  1640. sync_request_write(mddev, r10_bio);
  1641. else if (test_bit(R10BIO_IsRecover, &r10_bio->state))
  1642. recovery_request_write(mddev, r10_bio);
  1643. else if (test_bit(R10BIO_ReadError, &r10_bio->state))
  1644. handle_read_error(mddev, r10_bio);
  1645. else {
  1646. /* just a partial read to be scheduled from a
  1647. * separate context
  1648. */
  1649. int slot = r10_bio->read_slot;
  1650. generic_make_request(r10_bio->devs[slot].bio);
  1651. }
  1652. cond_resched();
  1653. if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
  1654. md_check_recovery(mddev);
  1655. }
  1656. blk_finish_plug(&plug);
  1657. }
  1658. static int init_resync(conf_t *conf)
  1659. {
  1660. int buffs;
  1661. buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
  1662. BUG_ON(conf->r10buf_pool);
  1663. conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf);
  1664. if (!conf->r10buf_pool)
  1665. return -ENOMEM;
  1666. conf->next_resync = 0;
  1667. return 0;
  1668. }
  1669. /*
  1670. * perform a "sync" on one "block"
  1671. *
  1672. * We need to make sure that no normal I/O request - particularly write
  1673. * requests - conflict with active sync requests.
  1674. *
  1675. * This is achieved by tracking pending requests and a 'barrier' concept
  1676. * that can be installed to exclude normal IO requests.
  1677. *
  1678. * Resync and recovery are handled very differently.
  1679. * We differentiate by looking at MD_RECOVERY_SYNC in mddev->recovery.
  1680. *
  1681. * For resync, we iterate over virtual addresses, read all copies,
  1682. * and update if there are differences. If only one copy is live,
  1683. * skip it.
  1684. * For recovery, we iterate over physical addresses, read a good
  1685. * value for each non-in_sync drive, and over-write.
  1686. *
  1687. * So, for recovery we may have several outstanding complex requests for a
  1688. * given address, one for each out-of-sync device. We model this by allocating
  1689. * a number of r10_bio structures, one for each out-of-sync device.
  1690. * As we setup these structures, we collect all bio's together into a list
  1691. * which we then process collectively to add pages, and then process again
  1692. * to pass to generic_make_request.
  1693. *
  1694. * The r10_bio structures are linked using a borrowed master_bio pointer.
  1695. * This link is counted in ->remaining. When the r10_bio that points to NULL
  1696. * has its remaining count decremented to 0, the whole complex operation
  1697. * is complete.
  1698. *
  1699. */
  1700. static sector_t sync_request(mddev_t *mddev, sector_t sector_nr,
  1701. int *skipped, int go_faster)
  1702. {
  1703. conf_t *conf = mddev->private;
  1704. r10bio_t *r10_bio;
  1705. struct bio *biolist = NULL, *bio;
  1706. sector_t max_sector, nr_sectors;
  1707. int i;
  1708. int max_sync;
  1709. sector_t sync_blocks;
  1710. sector_t sectors_skipped = 0;
  1711. int chunks_skipped = 0;
  1712. if (!conf->r10buf_pool)
  1713. if (init_resync(conf))
  1714. return 0;
  1715. skipped:
  1716. max_sector = mddev->dev_sectors;
  1717. if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
  1718. max_sector = mddev->resync_max_sectors;
  1719. if (sector_nr >= max_sector) {
  1720. /* If we aborted, we need to abort the
  1721. * sync on the 'current' bitmap chucks (there can
  1722. * be several when recovering multiple devices).
  1723. * as we may have started syncing it but not finished.
  1724. * We can find the current address in
  1725. * mddev->curr_resync, but for recovery,
  1726. * we need to convert that to several
  1727. * virtual addresses.
  1728. */
  1729. if (mddev->curr_resync < max_sector) { /* aborted */
  1730. if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
  1731. bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
  1732. &sync_blocks, 1);
  1733. else for (i=0; i<conf->raid_disks; i++) {
  1734. sector_t sect =
  1735. raid10_find_virt(conf, mddev->curr_resync, i);
  1736. bitmap_end_sync(mddev->bitmap, sect,
  1737. &sync_blocks, 1);
  1738. }
  1739. } else /* completed sync */
  1740. conf->fullsync = 0;
  1741. bitmap_close_sync(mddev->bitmap);
  1742. close_sync(conf);
  1743. *skipped = 1;
  1744. return sectors_skipped;
  1745. }
  1746. if (chunks_skipped >= conf->raid_disks) {
  1747. /* if there has been nothing to do on any drive,
  1748. * then there is nothing to do at all..
  1749. */
  1750. *skipped = 1;
  1751. return (max_sector - sector_nr) + sectors_skipped;
  1752. }
  1753. if (max_sector > mddev->resync_max)
  1754. max_sector = mddev->resync_max; /* Don't do IO beyond here */
  1755. /* make sure whole request will fit in a chunk - if chunks
  1756. * are meaningful
  1757. */
  1758. if (conf->near_copies < conf->raid_disks &&
  1759. max_sector > (sector_nr | conf->chunk_mask))
  1760. max_sector = (sector_nr | conf->chunk_mask) + 1;
  1761. /*
  1762. * If there is non-resync activity waiting for us then
  1763. * put in a delay to throttle resync.
  1764. */
  1765. if (!go_faster && conf->nr_waiting)
  1766. msleep_interruptible(1000);
  1767. /* Again, very different code for resync and recovery.
  1768. * Both must result in an r10bio with a list of bios that
  1769. * have bi_end_io, bi_sector, bi_bdev set,
  1770. * and bi_private set to the r10bio.
  1771. * For recovery, we may actually create several r10bios
  1772. * with 2 bios in each, that correspond to the bios in the main one.
  1773. * In this case, the subordinate r10bios link back through a
  1774. * borrowed master_bio pointer, and the counter in the master
  1775. * includes a ref from each subordinate.
  1776. */
  1777. /* First, we decide what to do and set ->bi_end_io
  1778. * To end_sync_read if we want to read, and
  1779. * end_sync_write if we will want to write.
  1780. */
  1781. max_sync = RESYNC_PAGES << (PAGE_SHIFT-9);
  1782. if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
  1783. /* recovery... the complicated one */
  1784. int j;
  1785. r10_bio = NULL;
  1786. for (i=0 ; i<conf->raid_disks; i++) {
  1787. int still_degraded;
  1788. r10bio_t *rb2;
  1789. sector_t sect;
  1790. int must_sync;
  1791. int any_working;
  1792. if (conf->mirrors[i].rdev == NULL ||
  1793. test_bit(In_sync, &conf->mirrors[i].rdev->flags))
  1794. continue;
  1795. still_degraded = 0;
  1796. /* want to reconstruct this device */
  1797. rb2 = r10_bio;
  1798. sect = raid10_find_virt(conf, sector_nr, i);
  1799. /* Unless we are doing a full sync, we only need
  1800. * to recover the block if it is set in the bitmap
  1801. */
  1802. must_sync = bitmap_start_sync(mddev->bitmap, sect,
  1803. &sync_blocks, 1);
  1804. if (sync_blocks < max_sync)
  1805. max_sync = sync_blocks;
  1806. if (!must_sync &&
  1807. !conf->fullsync) {
  1808. /* yep, skip the sync_blocks here, but don't assume
  1809. * that there will never be anything to do here
  1810. */
  1811. chunks_skipped = -1;
  1812. continue;
  1813. }
  1814. r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
  1815. raise_barrier(conf, rb2 != NULL);
  1816. atomic_set(&r10_bio->remaining, 0);
  1817. r10_bio->master_bio = (struct bio*)rb2;
  1818. if (rb2)
  1819. atomic_inc(&rb2->remaining);
  1820. r10_bio->mddev = mddev;
  1821. set_bit(R10BIO_IsRecover, &r10_bio->state);
  1822. r10_bio->sector = sect;
  1823. raid10_find_phys(conf, r10_bio);
  1824. /* Need to check if the array will still be
  1825. * degraded
  1826. */
  1827. for (j=0; j<conf->raid_disks; j++)
  1828. if (conf->mirrors[j].rdev == NULL ||
  1829. test_bit(Faulty, &conf->mirrors[j].rdev->flags)) {
  1830. still_degraded = 1;
  1831. break;
  1832. }
  1833. must_sync = bitmap_start_sync(mddev->bitmap, sect,
  1834. &sync_blocks, still_degraded);
  1835. any_working = 0;
  1836. for (j=0; j<conf->copies;j++) {
  1837. int k;
  1838. int d = r10_bio->devs[j].devnum;
  1839. mdk_rdev_t *rdev;
  1840. sector_t sector, first_bad;
  1841. int bad_sectors;
  1842. if (!conf->mirrors[d].rdev ||
  1843. !test_bit(In_sync, &conf->mirrors[d].rdev->flags))
  1844. continue;
  1845. /* This is where we read from */
  1846. any_working = 1;
  1847. rdev = conf->mirrors[d].rdev;
  1848. sector = r10_bio->devs[j].addr;
  1849. if (is_badblock(rdev, sector, max_sync,
  1850. &first_bad, &bad_sectors)) {
  1851. if (first_bad > sector)
  1852. max_sync = first_bad - sector;
  1853. else {
  1854. bad_sectors -= (sector
  1855. - first_bad);
  1856. if (max_sync > bad_sectors)
  1857. max_sync = bad_sectors;
  1858. continue;
  1859. }
  1860. }
  1861. bio = r10_bio->devs[0].bio;
  1862. bio->bi_next = biolist;
  1863. biolist = bio;
  1864. bio->bi_private = r10_bio;
  1865. bio->bi_end_io = end_sync_read;
  1866. bio->bi_rw = READ;
  1867. bio->bi_sector = r10_bio->devs[j].addr +
  1868. conf->mirrors[d].rdev->data_offset;
  1869. bio->bi_bdev = conf->mirrors[d].rdev->bdev;
  1870. atomic_inc(&conf->mirrors[d].rdev->nr_pending);
  1871. atomic_inc(&r10_bio->remaining);
  1872. /* and we write to 'i' */
  1873. for (k=0; k<conf->copies; k++)
  1874. if (r10_bio->devs[k].devnum == i)
  1875. break;
  1876. BUG_ON(k == conf->copies);
  1877. bio = r10_bio->devs[1].bio;
  1878. bio->bi_next = biolist;
  1879. biolist = bio;
  1880. bio->bi_private = r10_bio;
  1881. bio->bi_end_io = end_sync_write;
  1882. bio->bi_rw = WRITE;
  1883. bio->bi_sector = r10_bio->devs[k].addr +
  1884. conf->mirrors[i].rdev->data_offset;
  1885. bio->bi_bdev = conf->mirrors[i].rdev->bdev;
  1886. r10_bio->devs[0].devnum = d;
  1887. r10_bio->devs[1].devnum = i;
  1888. break;
  1889. }
  1890. if (j == conf->copies) {
  1891. /* Cannot recover, so abort the recovery or
  1892. * record a bad block */
  1893. put_buf(r10_bio);
  1894. if (rb2)
  1895. atomic_dec(&rb2->remaining);
  1896. r10_bio = rb2;
  1897. if (any_working) {
  1898. /* problem is that there are bad blocks
  1899. * on other device(s)
  1900. */
  1901. int k;
  1902. for (k = 0; k < conf->copies; k++)
  1903. if (r10_bio->devs[k].devnum == i)
  1904. break;
  1905. if (!rdev_set_badblocks(
  1906. conf->mirrors[i].rdev,
  1907. r10_bio->devs[k].addr,
  1908. max_sync, 0))
  1909. any_working = 0;
  1910. }
  1911. if (!any_working) {
  1912. if (!test_and_set_bit(MD_RECOVERY_INTR,
  1913. &mddev->recovery))
  1914. printk(KERN_INFO "md/raid10:%s: insufficient "
  1915. "working devices for recovery.\n",
  1916. mdname(mddev));
  1917. conf->mirrors[i].recovery_disabled
  1918. = mddev->recovery_disabled;
  1919. }
  1920. break;
  1921. }
  1922. }
  1923. if (biolist == NULL) {
  1924. while (r10_bio) {
  1925. r10bio_t *rb2 = r10_bio;
  1926. r10_bio = (r10bio_t*) rb2->master_bio;
  1927. rb2->master_bio = NULL;
  1928. put_buf(rb2);
  1929. }
  1930. goto giveup;
  1931. }
  1932. } else {
  1933. /* resync. Schedule a read for every block at this virt offset */
  1934. int count = 0;
  1935. bitmap_cond_end_sync(mddev->bitmap, sector_nr);
  1936. if (!bitmap_start_sync(mddev->bitmap, sector_nr,
  1937. &sync_blocks, mddev->degraded) &&
  1938. !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED,
  1939. &mddev->recovery)) {
  1940. /* We can skip this block */
  1941. *skipped = 1;
  1942. return sync_blocks + sectors_skipped;
  1943. }
  1944. if (sync_blocks < max_sync)
  1945. max_sync = sync_blocks;
  1946. r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
  1947. r10_bio->mddev = mddev;
  1948. atomic_set(&r10_bio->remaining, 0);
  1949. raise_barrier(conf, 0);
  1950. conf->next_resync = sector_nr;
  1951. r10_bio->master_bio = NULL;
  1952. r10_bio->sector = sector_nr;
  1953. set_bit(R10BIO_IsSync, &r10_bio->state);
  1954. raid10_find_phys(conf, r10_bio);
  1955. r10_bio->sectors = (sector_nr | conf->chunk_mask) - sector_nr +1;
  1956. for (i=0; i<conf->copies; i++) {
  1957. int d = r10_bio->devs[i].devnum;
  1958. sector_t first_bad, sector;
  1959. int bad_sectors;
  1960. bio = r10_bio->devs[i].bio;
  1961. bio->bi_end_io = NULL;
  1962. clear_bit(BIO_UPTODATE, &bio->bi_flags);
  1963. if (conf->mirrors[d].rdev == NULL ||
  1964. test_bit(Faulty, &conf->mirrors[d].rdev->flags))
  1965. continue;
  1966. sector = r10_bio->devs[i].addr;
  1967. if (is_badblock(conf->mirrors[d].rdev,
  1968. sector, max_sync,
  1969. &first_bad, &bad_sectors)) {
  1970. if (first_bad > sector)
  1971. max_sync = first_bad - sector;
  1972. else {
  1973. bad_sectors -= (sector - first_bad);
  1974. if (max_sync > bad_sectors)
  1975. max_sync = max_sync;
  1976. continue;
  1977. }
  1978. }
  1979. atomic_inc(&conf->mirrors[d].rdev->nr_pending);
  1980. atomic_inc(&r10_bio->remaining);
  1981. bio->bi_next = biolist;
  1982. biolist = bio;
  1983. bio->bi_private = r10_bio;
  1984. bio->bi_end_io = end_sync_read;
  1985. bio->bi_rw = READ;
  1986. bio->bi_sector = sector +
  1987. conf->mirrors[d].rdev->data_offset;
  1988. bio->bi_bdev = conf->mirrors[d].rdev->bdev;
  1989. count++;
  1990. }
  1991. if (count < 2) {
  1992. for (i=0; i<conf->copies; i++) {
  1993. int d = r10_bio->devs[i].devnum;
  1994. if (r10_bio->devs[i].bio->bi_end_io)
  1995. rdev_dec_pending(conf->mirrors[d].rdev,
  1996. mddev);
  1997. }
  1998. put_buf(r10_bio);
  1999. biolist = NULL;
  2000. goto giveup;
  2001. }
  2002. }
  2003. for (bio = biolist; bio ; bio=bio->bi_next) {
  2004. bio->bi_flags &= ~(BIO_POOL_MASK - 1);
  2005. if (bio->bi_end_io)
  2006. bio->bi_flags |= 1 << BIO_UPTODATE;
  2007. bio->bi_vcnt = 0;
  2008. bio->bi_idx = 0;
  2009. bio->bi_phys_segments = 0;
  2010. bio->bi_size = 0;
  2011. }
  2012. nr_sectors = 0;
  2013. if (sector_nr + max_sync < max_sector)
  2014. max_sector = sector_nr + max_sync;
  2015. do {
  2016. struct page *page;
  2017. int len = PAGE_SIZE;
  2018. if (sector_nr + (len>>9) > max_sector)
  2019. len = (max_sector - sector_nr) << 9;
  2020. if (len == 0)
  2021. break;
  2022. for (bio= biolist ; bio ; bio=bio->bi_next) {
  2023. struct bio *bio2;
  2024. page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
  2025. if (bio_add_page(bio, page, len, 0))
  2026. continue;
  2027. /* stop here */
  2028. bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
  2029. for (bio2 = biolist;
  2030. bio2 && bio2 != bio;
  2031. bio2 = bio2->bi_next) {
  2032. /* remove last page from this bio */
  2033. bio2->bi_vcnt--;
  2034. bio2->bi_size -= len;
  2035. bio2->bi_flags &= ~(1<< BIO_SEG_VALID);
  2036. }
  2037. goto bio_full;
  2038. }
  2039. nr_sectors += len>>9;
  2040. sector_nr += len>>9;
  2041. } while (biolist->bi_vcnt < RESYNC_PAGES);
  2042. bio_full:
  2043. r10_bio->sectors = nr_sectors;
  2044. while (biolist) {
  2045. bio = biolist;
  2046. biolist = biolist->bi_next;
  2047. bio->bi_next = NULL;
  2048. r10_bio = bio->bi_private;
  2049. r10_bio->sectors = nr_sectors;
  2050. if (bio->bi_end_io == end_sync_read) {
  2051. md_sync_acct(bio->bi_bdev, nr_sectors);
  2052. generic_make_request(bio);
  2053. }
  2054. }
  2055. if (sectors_skipped)
  2056. /* pretend they weren't skipped, it makes
  2057. * no important difference in this case
  2058. */
  2059. md_done_sync(mddev, sectors_skipped, 1);
  2060. return sectors_skipped + nr_sectors;
  2061. giveup:
  2062. /* There is nowhere to write, so all non-sync
  2063. * drives must be failed or in resync, all drives
  2064. * have a bad block, so try the next chunk...
  2065. */
  2066. if (sector_nr + max_sync < max_sector)
  2067. max_sector = sector_nr + max_sync;
  2068. sectors_skipped += (max_sector - sector_nr);
  2069. chunks_skipped ++;
  2070. sector_nr = max_sector;
  2071. goto skipped;
  2072. }
  2073. static sector_t
  2074. raid10_size(mddev_t *mddev, sector_t sectors, int raid_disks)
  2075. {
  2076. sector_t size;
  2077. conf_t *conf = mddev->private;
  2078. if (!raid_disks)
  2079. raid_disks = conf->raid_disks;
  2080. if (!sectors)
  2081. sectors = conf->dev_sectors;
  2082. size = sectors >> conf->chunk_shift;
  2083. sector_div(size, conf->far_copies);
  2084. size = size * raid_disks;
  2085. sector_div(size, conf->near_copies);
  2086. return size << conf->chunk_shift;
  2087. }
  2088. static conf_t *setup_conf(mddev_t *mddev)
  2089. {
  2090. conf_t *conf = NULL;
  2091. int nc, fc, fo;
  2092. sector_t stride, size;
  2093. int err = -EINVAL;
  2094. if (mddev->new_chunk_sectors < (PAGE_SIZE >> 9) ||
  2095. !is_power_of_2(mddev->new_chunk_sectors)) {
  2096. printk(KERN_ERR "md/raid10:%s: chunk size must be "
  2097. "at least PAGE_SIZE(%ld) and be a power of 2.\n",
  2098. mdname(mddev), PAGE_SIZE);
  2099. goto out;
  2100. }
  2101. nc = mddev->new_layout & 255;
  2102. fc = (mddev->new_layout >> 8) & 255;
  2103. fo = mddev->new_layout & (1<<16);
  2104. if ((nc*fc) <2 || (nc*fc) > mddev->raid_disks ||
  2105. (mddev->new_layout >> 17)) {
  2106. printk(KERN_ERR "md/raid10:%s: unsupported raid10 layout: 0x%8x\n",
  2107. mdname(mddev), mddev->new_layout);
  2108. goto out;
  2109. }
  2110. err = -ENOMEM;
  2111. conf = kzalloc(sizeof(conf_t), GFP_KERNEL);
  2112. if (!conf)
  2113. goto out;
  2114. conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks,
  2115. GFP_KERNEL);
  2116. if (!conf->mirrors)
  2117. goto out;
  2118. conf->tmppage = alloc_page(GFP_KERNEL);
  2119. if (!conf->tmppage)
  2120. goto out;
  2121. conf->raid_disks = mddev->raid_disks;
  2122. conf->near_copies = nc;
  2123. conf->far_copies = fc;
  2124. conf->copies = nc*fc;
  2125. conf->far_offset = fo;
  2126. conf->chunk_mask = mddev->new_chunk_sectors - 1;
  2127. conf->chunk_shift = ffz(~mddev->new_chunk_sectors);
  2128. conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc,
  2129. r10bio_pool_free, conf);
  2130. if (!conf->r10bio_pool)
  2131. goto out;
  2132. size = mddev->dev_sectors >> conf->chunk_shift;
  2133. sector_div(size, fc);
  2134. size = size * conf->raid_disks;
  2135. sector_div(size, nc);
  2136. /* 'size' is now the number of chunks in the array */
  2137. /* calculate "used chunks per device" in 'stride' */
  2138. stride = size * conf->copies;
  2139. /* We need to round up when dividing by raid_disks to
  2140. * get the stride size.
  2141. */
  2142. stride += conf->raid_disks - 1;
  2143. sector_div(stride, conf->raid_disks);
  2144. conf->dev_sectors = stride << conf->chunk_shift;
  2145. if (fo)
  2146. stride = 1;
  2147. else
  2148. sector_div(stride, fc);
  2149. conf->stride = stride << conf->chunk_shift;
  2150. spin_lock_init(&conf->device_lock);
  2151. INIT_LIST_HEAD(&conf->retry_list);
  2152. spin_lock_init(&conf->resync_lock);
  2153. init_waitqueue_head(&conf->wait_barrier);
  2154. conf->thread = md_register_thread(raid10d, mddev, NULL);
  2155. if (!conf->thread)
  2156. goto out;
  2157. conf->mddev = mddev;
  2158. return conf;
  2159. out:
  2160. printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n",
  2161. mdname(mddev));
  2162. if (conf) {
  2163. if (conf->r10bio_pool)
  2164. mempool_destroy(conf->r10bio_pool);
  2165. kfree(conf->mirrors);
  2166. safe_put_page(conf->tmppage);
  2167. kfree(conf);
  2168. }
  2169. return ERR_PTR(err);
  2170. }
  2171. static int run(mddev_t *mddev)
  2172. {
  2173. conf_t *conf;
  2174. int i, disk_idx, chunk_size;
  2175. mirror_info_t *disk;
  2176. mdk_rdev_t *rdev;
  2177. sector_t size;
  2178. /*
  2179. * copy the already verified devices into our private RAID10
  2180. * bookkeeping area. [whatever we allocate in run(),
  2181. * should be freed in stop()]
  2182. */
  2183. if (mddev->private == NULL) {
  2184. conf = setup_conf(mddev);
  2185. if (IS_ERR(conf))
  2186. return PTR_ERR(conf);
  2187. mddev->private = conf;
  2188. }
  2189. conf = mddev->private;
  2190. if (!conf)
  2191. goto out;
  2192. mddev->thread = conf->thread;
  2193. conf->thread = NULL;
  2194. chunk_size = mddev->chunk_sectors << 9;
  2195. blk_queue_io_min(mddev->queue, chunk_size);
  2196. if (conf->raid_disks % conf->near_copies)
  2197. blk_queue_io_opt(mddev->queue, chunk_size * conf->raid_disks);
  2198. else
  2199. blk_queue_io_opt(mddev->queue, chunk_size *
  2200. (conf->raid_disks / conf->near_copies));
  2201. list_for_each_entry(rdev, &mddev->disks, same_set) {
  2202. disk_idx = rdev->raid_disk;
  2203. if (disk_idx >= conf->raid_disks
  2204. || disk_idx < 0)
  2205. continue;
  2206. disk = conf->mirrors + disk_idx;
  2207. disk->rdev = rdev;
  2208. disk_stack_limits(mddev->gendisk, rdev->bdev,
  2209. rdev->data_offset << 9);
  2210. /* as we don't honour merge_bvec_fn, we must never risk
  2211. * violating it, so limit max_segments to 1 lying
  2212. * within a single page.
  2213. */
  2214. if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
  2215. blk_queue_max_segments(mddev->queue, 1);
  2216. blk_queue_segment_boundary(mddev->queue,
  2217. PAGE_CACHE_SIZE - 1);
  2218. }
  2219. disk->head_position = 0;
  2220. }
  2221. /* need to check that every block has at least one working mirror */
  2222. if (!enough(conf, -1)) {
  2223. printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n",
  2224. mdname(mddev));
  2225. goto out_free_conf;
  2226. }
  2227. mddev->degraded = 0;
  2228. for (i = 0; i < conf->raid_disks; i++) {
  2229. disk = conf->mirrors + i;
  2230. if (!disk->rdev ||
  2231. !test_bit(In_sync, &disk->rdev->flags)) {
  2232. disk->head_position = 0;
  2233. mddev->degraded++;
  2234. if (disk->rdev)
  2235. conf->fullsync = 1;
  2236. }
  2237. }
  2238. if (mddev->recovery_cp != MaxSector)
  2239. printk(KERN_NOTICE "md/raid10:%s: not clean"
  2240. " -- starting background reconstruction\n",
  2241. mdname(mddev));
  2242. printk(KERN_INFO
  2243. "md/raid10:%s: active with %d out of %d devices\n",
  2244. mdname(mddev), conf->raid_disks - mddev->degraded,
  2245. conf->raid_disks);
  2246. /*
  2247. * Ok, everything is just fine now
  2248. */
  2249. mddev->dev_sectors = conf->dev_sectors;
  2250. size = raid10_size(mddev, 0, 0);
  2251. md_set_array_sectors(mddev, size);
  2252. mddev->resync_max_sectors = size;
  2253. mddev->queue->backing_dev_info.congested_fn = raid10_congested;
  2254. mddev->queue->backing_dev_info.congested_data = mddev;
  2255. /* Calculate max read-ahead size.
  2256. * We need to readahead at least twice a whole stripe....
  2257. * maybe...
  2258. */
  2259. {
  2260. int stripe = conf->raid_disks *
  2261. ((mddev->chunk_sectors << 9) / PAGE_SIZE);
  2262. stripe /= conf->near_copies;
  2263. if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
  2264. mddev->queue->backing_dev_info.ra_pages = 2* stripe;
  2265. }
  2266. if (conf->near_copies < conf->raid_disks)
  2267. blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
  2268. if (md_integrity_register(mddev))
  2269. goto out_free_conf;
  2270. return 0;
  2271. out_free_conf:
  2272. md_unregister_thread(mddev->thread);
  2273. if (conf->r10bio_pool)
  2274. mempool_destroy(conf->r10bio_pool);
  2275. safe_put_page(conf->tmppage);
  2276. kfree(conf->mirrors);
  2277. kfree(conf);
  2278. mddev->private = NULL;
  2279. out:
  2280. return -EIO;
  2281. }
  2282. static int stop(mddev_t *mddev)
  2283. {
  2284. conf_t *conf = mddev->private;
  2285. raise_barrier(conf, 0);
  2286. lower_barrier(conf);
  2287. md_unregister_thread(mddev->thread);
  2288. mddev->thread = NULL;
  2289. blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
  2290. if (conf->r10bio_pool)
  2291. mempool_destroy(conf->r10bio_pool);
  2292. kfree(conf->mirrors);
  2293. kfree(conf);
  2294. mddev->private = NULL;
  2295. return 0;
  2296. }
  2297. static void raid10_quiesce(mddev_t *mddev, int state)
  2298. {
  2299. conf_t *conf = mddev->private;
  2300. switch(state) {
  2301. case 1:
  2302. raise_barrier(conf, 0);
  2303. break;
  2304. case 0:
  2305. lower_barrier(conf);
  2306. break;
  2307. }
  2308. }
  2309. static void *raid10_takeover_raid0(mddev_t *mddev)
  2310. {
  2311. mdk_rdev_t *rdev;
  2312. conf_t *conf;
  2313. if (mddev->degraded > 0) {
  2314. printk(KERN_ERR "md/raid10:%s: Error: degraded raid0!\n",
  2315. mdname(mddev));
  2316. return ERR_PTR(-EINVAL);
  2317. }
  2318. /* Set new parameters */
  2319. mddev->new_level = 10;
  2320. /* new layout: far_copies = 1, near_copies = 2 */
  2321. mddev->new_layout = (1<<8) + 2;
  2322. mddev->new_chunk_sectors = mddev->chunk_sectors;
  2323. mddev->delta_disks = mddev->raid_disks;
  2324. mddev->raid_disks *= 2;
  2325. /* make sure it will be not marked as dirty */
  2326. mddev->recovery_cp = MaxSector;
  2327. conf = setup_conf(mddev);
  2328. if (!IS_ERR(conf)) {
  2329. list_for_each_entry(rdev, &mddev->disks, same_set)
  2330. if (rdev->raid_disk >= 0)
  2331. rdev->new_raid_disk = rdev->raid_disk * 2;
  2332. conf->barrier = 1;
  2333. }
  2334. return conf;
  2335. }
  2336. static void *raid10_takeover(mddev_t *mddev)
  2337. {
  2338. struct raid0_private_data *raid0_priv;
  2339. /* raid10 can take over:
  2340. * raid0 - providing it has only two drives
  2341. */
  2342. if (mddev->level == 0) {
  2343. /* for raid0 takeover only one zone is supported */
  2344. raid0_priv = mddev->private;
  2345. if (raid0_priv->nr_strip_zones > 1) {
  2346. printk(KERN_ERR "md/raid10:%s: cannot takeover raid 0"
  2347. " with more than one zone.\n",
  2348. mdname(mddev));
  2349. return ERR_PTR(-EINVAL);
  2350. }
  2351. return raid10_takeover_raid0(mddev);
  2352. }
  2353. return ERR_PTR(-EINVAL);
  2354. }
  2355. static struct mdk_personality raid10_personality =
  2356. {
  2357. .name = "raid10",
  2358. .level = 10,
  2359. .owner = THIS_MODULE,
  2360. .make_request = make_request,
  2361. .run = run,
  2362. .stop = stop,
  2363. .status = status,
  2364. .error_handler = error,
  2365. .hot_add_disk = raid10_add_disk,
  2366. .hot_remove_disk= raid10_remove_disk,
  2367. .spare_active = raid10_spare_active,
  2368. .sync_request = sync_request,
  2369. .quiesce = raid10_quiesce,
  2370. .size = raid10_size,
  2371. .takeover = raid10_takeover,
  2372. };
  2373. static int __init raid_init(void)
  2374. {
  2375. return register_md_personality(&raid10_personality);
  2376. }
  2377. static void raid_exit(void)
  2378. {
  2379. unregister_md_personality(&raid10_personality);
  2380. }
  2381. module_init(raid_init);
  2382. module_exit(raid_exit);
  2383. MODULE_LICENSE("GPL");
  2384. MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD");
  2385. MODULE_ALIAS("md-personality-9"); /* RAID10 */
  2386. MODULE_ALIAS("md-raid10");
  2387. MODULE_ALIAS("md-level-10");