bitmap.c 55 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110
  1. /*
  2. * bitmap.c two-level bitmap (C) Peter T. Breuer (ptb@ot.uc3m.es) 2003
  3. *
  4. * bitmap_create - sets up the bitmap structure
  5. * bitmap_destroy - destroys the bitmap structure
  6. *
  7. * additions, Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.:
  8. * - added disk storage for bitmap
  9. * - changes to allow various bitmap chunk sizes
  10. */
  11. /*
  12. * Still to do:
  13. *
  14. * flush after percent set rather than just time based. (maybe both).
  15. */
  16. #include <linux/blkdev.h>
  17. #include <linux/module.h>
  18. #include <linux/errno.h>
  19. #include <linux/slab.h>
  20. #include <linux/init.h>
  21. #include <linux/timer.h>
  22. #include <linux/sched.h>
  23. #include <linux/list.h>
  24. #include <linux/file.h>
  25. #include <linux/mount.h>
  26. #include <linux/buffer_head.h>
  27. #include "md.h"
  28. #include "bitmap.h"
  29. #include <linux/dm-dirty-log.h>
  30. /* debug macros */
  31. #define DEBUG 0
  32. #if DEBUG
  33. /* these are for debugging purposes only! */
  34. /* define one and only one of these */
  35. #define INJECT_FAULTS_1 0 /* cause bitmap_alloc_page to fail always */
  36. #define INJECT_FAULTS_2 0 /* cause bitmap file to be kicked when first bit set*/
  37. #define INJECT_FAULTS_3 0 /* treat bitmap file as kicked at init time */
  38. #define INJECT_FAULTS_4 0 /* undef */
  39. #define INJECT_FAULTS_5 0 /* undef */
  40. #define INJECT_FAULTS_6 0
  41. /* if these are defined, the driver will fail! debug only */
  42. #define INJECT_FATAL_FAULT_1 0 /* fail kmalloc, causing bitmap_create to fail */
  43. #define INJECT_FATAL_FAULT_2 0 /* undef */
  44. #define INJECT_FATAL_FAULT_3 0 /* undef */
  45. #endif
  46. #ifndef PRINTK
  47. # if DEBUG > 0
  48. # define PRINTK(x...) printk(KERN_DEBUG x)
  49. # else
  50. # define PRINTK(x...)
  51. # endif
  52. #endif
  53. static inline char *bmname(struct bitmap *bitmap)
  54. {
  55. return bitmap->mddev ? mdname(bitmap->mddev) : "mdX";
  56. }
  57. /*
  58. * just a placeholder - calls kmalloc for bitmap pages
  59. */
  60. static unsigned char *bitmap_alloc_page(struct bitmap *bitmap)
  61. {
  62. unsigned char *page;
  63. #ifdef INJECT_FAULTS_1
  64. page = NULL;
  65. #else
  66. page = kzalloc(PAGE_SIZE, GFP_NOIO);
  67. #endif
  68. if (!page)
  69. printk("%s: bitmap_alloc_page FAILED\n", bmname(bitmap));
  70. else
  71. PRINTK("%s: bitmap_alloc_page: allocated page at %p\n",
  72. bmname(bitmap), page);
  73. return page;
  74. }
  75. /*
  76. * for now just a placeholder -- just calls kfree for bitmap pages
  77. */
  78. static void bitmap_free_page(struct bitmap *bitmap, unsigned char *page)
  79. {
  80. PRINTK("%s: bitmap_free_page: free page %p\n", bmname(bitmap), page);
  81. kfree(page);
  82. }
  83. /*
  84. * check a page and, if necessary, allocate it (or hijack it if the alloc fails)
  85. *
  86. * 1) check to see if this page is allocated, if it's not then try to alloc
  87. * 2) if the alloc fails, set the page's hijacked flag so we'll use the
  88. * page pointer directly as a counter
  89. *
  90. * if we find our page, we increment the page's refcount so that it stays
  91. * allocated while we're using it
  92. */
  93. static int bitmap_checkpage(struct bitmap *bitmap,
  94. unsigned long page, int create)
  95. __releases(bitmap->lock)
  96. __acquires(bitmap->lock)
  97. {
  98. unsigned char *mappage;
  99. if (page >= bitmap->pages) {
  100. /* This can happen if bitmap_start_sync goes beyond
  101. * End-of-device while looking for a whole page.
  102. * It is harmless.
  103. */
  104. return -EINVAL;
  105. }
  106. if (bitmap->bp[page].hijacked) /* it's hijacked, don't try to alloc */
  107. return 0;
  108. if (bitmap->bp[page].map) /* page is already allocated, just return */
  109. return 0;
  110. if (!create)
  111. return -ENOENT;
  112. /* this page has not been allocated yet */
  113. spin_unlock_irq(&bitmap->lock);
  114. mappage = bitmap_alloc_page(bitmap);
  115. spin_lock_irq(&bitmap->lock);
  116. if (mappage == NULL) {
  117. PRINTK("%s: bitmap map page allocation failed, hijacking\n",
  118. bmname(bitmap));
  119. /* failed - set the hijacked flag so that we can use the
  120. * pointer as a counter */
  121. if (!bitmap->bp[page].map)
  122. bitmap->bp[page].hijacked = 1;
  123. } else if (bitmap->bp[page].map ||
  124. bitmap->bp[page].hijacked) {
  125. /* somebody beat us to getting the page */
  126. bitmap_free_page(bitmap, mappage);
  127. return 0;
  128. } else {
  129. /* no page was in place and we have one, so install it */
  130. bitmap->bp[page].map = mappage;
  131. bitmap->missing_pages--;
  132. }
  133. return 0;
  134. }
  135. /* if page is completely empty, put it back on the free list, or dealloc it */
  136. /* if page was hijacked, unmark the flag so it might get alloced next time */
  137. /* Note: lock should be held when calling this */
  138. static void bitmap_checkfree(struct bitmap *bitmap, unsigned long page)
  139. {
  140. char *ptr;
  141. if (bitmap->bp[page].count) /* page is still busy */
  142. return;
  143. /* page is no longer in use, it can be released */
  144. if (bitmap->bp[page].hijacked) { /* page was hijacked, undo this now */
  145. bitmap->bp[page].hijacked = 0;
  146. bitmap->bp[page].map = NULL;
  147. } else {
  148. /* normal case, free the page */
  149. ptr = bitmap->bp[page].map;
  150. bitmap->bp[page].map = NULL;
  151. bitmap->missing_pages++;
  152. bitmap_free_page(bitmap, ptr);
  153. }
  154. }
  155. /*
  156. * bitmap file handling - read and write the bitmap file and its superblock
  157. */
  158. /*
  159. * basic page I/O operations
  160. */
  161. /* IO operations when bitmap is stored near all superblocks */
  162. static struct page *read_sb_page(mddev_t *mddev, loff_t offset,
  163. struct page *page,
  164. unsigned long index, int size)
  165. {
  166. /* choose a good rdev and read the page from there */
  167. mdk_rdev_t *rdev;
  168. sector_t target;
  169. int did_alloc = 0;
  170. if (!page) {
  171. page = alloc_page(GFP_KERNEL);
  172. if (!page)
  173. return ERR_PTR(-ENOMEM);
  174. did_alloc = 1;
  175. }
  176. list_for_each_entry(rdev, &mddev->disks, same_set) {
  177. if (! test_bit(In_sync, &rdev->flags)
  178. || test_bit(Faulty, &rdev->flags))
  179. continue;
  180. target = rdev->sb_start + offset + index * (PAGE_SIZE/512);
  181. if (sync_page_io(rdev, target,
  182. roundup(size, bdev_logical_block_size(rdev->bdev)),
  183. page, READ)) {
  184. page->index = index;
  185. attach_page_buffers(page, NULL); /* so that free_buffer will
  186. * quietly no-op */
  187. return page;
  188. }
  189. }
  190. if (did_alloc)
  191. put_page(page);
  192. return ERR_PTR(-EIO);
  193. }
  194. static mdk_rdev_t *next_active_rdev(mdk_rdev_t *rdev, mddev_t *mddev)
  195. {
  196. /* Iterate the disks of an mddev, using rcu to protect access to the
  197. * linked list, and raising the refcount of devices we return to ensure
  198. * they don't disappear while in use.
  199. * As devices are only added or removed when raid_disk is < 0 and
  200. * nr_pending is 0 and In_sync is clear, the entries we return will
  201. * still be in the same position on the list when we re-enter
  202. * list_for_each_continue_rcu.
  203. */
  204. struct list_head *pos;
  205. rcu_read_lock();
  206. if (rdev == NULL)
  207. /* start at the beginning */
  208. pos = &mddev->disks;
  209. else {
  210. /* release the previous rdev and start from there. */
  211. rdev_dec_pending(rdev, mddev);
  212. pos = &rdev->same_set;
  213. }
  214. list_for_each_continue_rcu(pos, &mddev->disks) {
  215. rdev = list_entry(pos, mdk_rdev_t, same_set);
  216. if (rdev->raid_disk >= 0 &&
  217. !test_bit(Faulty, &rdev->flags)) {
  218. /* this is a usable devices */
  219. atomic_inc(&rdev->nr_pending);
  220. rcu_read_unlock();
  221. return rdev;
  222. }
  223. }
  224. rcu_read_unlock();
  225. return NULL;
  226. }
  227. static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
  228. {
  229. mdk_rdev_t *rdev = NULL;
  230. mddev_t *mddev = bitmap->mddev;
  231. while ((rdev = next_active_rdev(rdev, mddev)) != NULL) {
  232. int size = PAGE_SIZE;
  233. loff_t offset = mddev->bitmap_info.offset;
  234. if (page->index == bitmap->file_pages-1)
  235. size = roundup(bitmap->last_page_size,
  236. bdev_logical_block_size(rdev->bdev));
  237. /* Just make sure we aren't corrupting data or
  238. * metadata
  239. */
  240. if (mddev->external) {
  241. /* Bitmap could be anywhere. */
  242. if (rdev->sb_start + offset + (page->index
  243. * (PAGE_SIZE/512))
  244. > rdev->data_offset
  245. &&
  246. rdev->sb_start + offset
  247. < (rdev->data_offset + mddev->dev_sectors
  248. + (PAGE_SIZE/512)))
  249. goto bad_alignment;
  250. } else if (offset < 0) {
  251. /* DATA BITMAP METADATA */
  252. if (offset
  253. + (long)(page->index * (PAGE_SIZE/512))
  254. + size/512 > 0)
  255. /* bitmap runs in to metadata */
  256. goto bad_alignment;
  257. if (rdev->data_offset + mddev->dev_sectors
  258. > rdev->sb_start + offset)
  259. /* data runs in to bitmap */
  260. goto bad_alignment;
  261. } else if (rdev->sb_start < rdev->data_offset) {
  262. /* METADATA BITMAP DATA */
  263. if (rdev->sb_start
  264. + offset
  265. + page->index*(PAGE_SIZE/512) + size/512
  266. > rdev->data_offset)
  267. /* bitmap runs in to data */
  268. goto bad_alignment;
  269. } else {
  270. /* DATA METADATA BITMAP - no problems */
  271. }
  272. md_super_write(mddev, rdev,
  273. rdev->sb_start + offset
  274. + page->index * (PAGE_SIZE/512),
  275. size,
  276. page);
  277. }
  278. if (wait)
  279. md_super_wait(mddev);
  280. return 0;
  281. bad_alignment:
  282. return -EINVAL;
  283. }
  284. static void bitmap_file_kick(struct bitmap *bitmap);
  285. /*
  286. * write out a page to a file
  287. */
  288. static void write_page(struct bitmap *bitmap, struct page *page, int wait)
  289. {
  290. struct buffer_head *bh;
  291. if (bitmap->file == NULL) {
  292. switch (write_sb_page(bitmap, page, wait)) {
  293. case -EINVAL:
  294. bitmap->flags |= BITMAP_WRITE_ERROR;
  295. }
  296. } else {
  297. bh = page_buffers(page);
  298. while (bh && bh->b_blocknr) {
  299. atomic_inc(&bitmap->pending_writes);
  300. set_buffer_locked(bh);
  301. set_buffer_mapped(bh);
  302. submit_bh(WRITE | REQ_UNPLUG | REQ_SYNC, bh);
  303. bh = bh->b_this_page;
  304. }
  305. if (wait)
  306. wait_event(bitmap->write_wait,
  307. atomic_read(&bitmap->pending_writes)==0);
  308. }
  309. if (bitmap->flags & BITMAP_WRITE_ERROR)
  310. bitmap_file_kick(bitmap);
  311. }
  312. static void end_bitmap_write(struct buffer_head *bh, int uptodate)
  313. {
  314. struct bitmap *bitmap = bh->b_private;
  315. unsigned long flags;
  316. if (!uptodate) {
  317. spin_lock_irqsave(&bitmap->lock, flags);
  318. bitmap->flags |= BITMAP_WRITE_ERROR;
  319. spin_unlock_irqrestore(&bitmap->lock, flags);
  320. }
  321. if (atomic_dec_and_test(&bitmap->pending_writes))
  322. wake_up(&bitmap->write_wait);
  323. }
  324. /* copied from buffer.c */
  325. static void
  326. __clear_page_buffers(struct page *page)
  327. {
  328. ClearPagePrivate(page);
  329. set_page_private(page, 0);
  330. page_cache_release(page);
  331. }
  332. static void free_buffers(struct page *page)
  333. {
  334. struct buffer_head *bh = page_buffers(page);
  335. while (bh) {
  336. struct buffer_head *next = bh->b_this_page;
  337. free_buffer_head(bh);
  338. bh = next;
  339. }
  340. __clear_page_buffers(page);
  341. put_page(page);
  342. }
  343. /* read a page from a file.
  344. * We both read the page, and attach buffers to the page to record the
  345. * address of each block (using bmap). These addresses will be used
  346. * to write the block later, completely bypassing the filesystem.
  347. * This usage is similar to how swap files are handled, and allows us
  348. * to write to a file with no concerns of memory allocation failing.
  349. */
  350. static struct page *read_page(struct file *file, unsigned long index,
  351. struct bitmap *bitmap,
  352. unsigned long count)
  353. {
  354. struct page *page = NULL;
  355. struct inode *inode = file->f_path.dentry->d_inode;
  356. struct buffer_head *bh;
  357. sector_t block;
  358. PRINTK("read bitmap file (%dB @ %llu)\n", (int)PAGE_SIZE,
  359. (unsigned long long)index << PAGE_SHIFT);
  360. page = alloc_page(GFP_KERNEL);
  361. if (!page)
  362. page = ERR_PTR(-ENOMEM);
  363. if (IS_ERR(page))
  364. goto out;
  365. bh = alloc_page_buffers(page, 1<<inode->i_blkbits, 0);
  366. if (!bh) {
  367. put_page(page);
  368. page = ERR_PTR(-ENOMEM);
  369. goto out;
  370. }
  371. attach_page_buffers(page, bh);
  372. block = index << (PAGE_SHIFT - inode->i_blkbits);
  373. while (bh) {
  374. if (count == 0)
  375. bh->b_blocknr = 0;
  376. else {
  377. bh->b_blocknr = bmap(inode, block);
  378. if (bh->b_blocknr == 0) {
  379. /* Cannot use this file! */
  380. free_buffers(page);
  381. page = ERR_PTR(-EINVAL);
  382. goto out;
  383. }
  384. bh->b_bdev = inode->i_sb->s_bdev;
  385. if (count < (1<<inode->i_blkbits))
  386. count = 0;
  387. else
  388. count -= (1<<inode->i_blkbits);
  389. bh->b_end_io = end_bitmap_write;
  390. bh->b_private = bitmap;
  391. atomic_inc(&bitmap->pending_writes);
  392. set_buffer_locked(bh);
  393. set_buffer_mapped(bh);
  394. submit_bh(READ, bh);
  395. }
  396. block++;
  397. bh = bh->b_this_page;
  398. }
  399. page->index = index;
  400. wait_event(bitmap->write_wait,
  401. atomic_read(&bitmap->pending_writes)==0);
  402. if (bitmap->flags & BITMAP_WRITE_ERROR) {
  403. free_buffers(page);
  404. page = ERR_PTR(-EIO);
  405. }
  406. out:
  407. if (IS_ERR(page))
  408. printk(KERN_ALERT "md: bitmap read error: (%dB @ %llu): %ld\n",
  409. (int)PAGE_SIZE,
  410. (unsigned long long)index << PAGE_SHIFT,
  411. PTR_ERR(page));
  412. return page;
  413. }
  414. /*
  415. * bitmap file superblock operations
  416. */
  417. /* update the event counter and sync the superblock to disk */
  418. void bitmap_update_sb(struct bitmap *bitmap)
  419. {
  420. bitmap_super_t *sb;
  421. unsigned long flags;
  422. if (!bitmap || !bitmap->mddev) /* no bitmap for this array */
  423. return;
  424. if (bitmap->mddev->bitmap_info.external)
  425. return;
  426. spin_lock_irqsave(&bitmap->lock, flags);
  427. if (!bitmap->sb_page) { /* no superblock */
  428. spin_unlock_irqrestore(&bitmap->lock, flags);
  429. return;
  430. }
  431. spin_unlock_irqrestore(&bitmap->lock, flags);
  432. sb = kmap_atomic(bitmap->sb_page, KM_USER0);
  433. sb->events = cpu_to_le64(bitmap->mddev->events);
  434. if (bitmap->mddev->events < bitmap->events_cleared) {
  435. /* rocking back to read-only */
  436. bitmap->events_cleared = bitmap->mddev->events;
  437. sb->events_cleared = cpu_to_le64(bitmap->events_cleared);
  438. }
  439. /* Just in case these have been changed via sysfs: */
  440. sb->daemon_sleep = cpu_to_le32(bitmap->mddev->bitmap_info.daemon_sleep/HZ);
  441. sb->write_behind = cpu_to_le32(bitmap->mddev->bitmap_info.max_write_behind);
  442. kunmap_atomic(sb, KM_USER0);
  443. write_page(bitmap, bitmap->sb_page, 1);
  444. }
  445. /* print out the bitmap file superblock */
  446. void bitmap_print_sb(struct bitmap *bitmap)
  447. {
  448. bitmap_super_t *sb;
  449. if (!bitmap || !bitmap->sb_page)
  450. return;
  451. sb = kmap_atomic(bitmap->sb_page, KM_USER0);
  452. printk(KERN_DEBUG "%s: bitmap file superblock:\n", bmname(bitmap));
  453. printk(KERN_DEBUG " magic: %08x\n", le32_to_cpu(sb->magic));
  454. printk(KERN_DEBUG " version: %d\n", le32_to_cpu(sb->version));
  455. printk(KERN_DEBUG " uuid: %08x.%08x.%08x.%08x\n",
  456. *(__u32 *)(sb->uuid+0),
  457. *(__u32 *)(sb->uuid+4),
  458. *(__u32 *)(sb->uuid+8),
  459. *(__u32 *)(sb->uuid+12));
  460. printk(KERN_DEBUG " events: %llu\n",
  461. (unsigned long long) le64_to_cpu(sb->events));
  462. printk(KERN_DEBUG "events cleared: %llu\n",
  463. (unsigned long long) le64_to_cpu(sb->events_cleared));
  464. printk(KERN_DEBUG " state: %08x\n", le32_to_cpu(sb->state));
  465. printk(KERN_DEBUG " chunksize: %d B\n", le32_to_cpu(sb->chunksize));
  466. printk(KERN_DEBUG " daemon sleep: %ds\n", le32_to_cpu(sb->daemon_sleep));
  467. printk(KERN_DEBUG " sync size: %llu KB\n",
  468. (unsigned long long)le64_to_cpu(sb->sync_size)/2);
  469. printk(KERN_DEBUG "max write behind: %d\n", le32_to_cpu(sb->write_behind));
  470. kunmap_atomic(sb, KM_USER0);
  471. }
  472. /* read the superblock from the bitmap file and initialize some bitmap fields */
  473. static int bitmap_read_sb(struct bitmap *bitmap)
  474. {
  475. char *reason = NULL;
  476. bitmap_super_t *sb;
  477. unsigned long chunksize, daemon_sleep, write_behind;
  478. unsigned long long events;
  479. int err = -EINVAL;
  480. /* page 0 is the superblock, read it... */
  481. if (bitmap->file) {
  482. loff_t isize = i_size_read(bitmap->file->f_mapping->host);
  483. int bytes = isize > PAGE_SIZE ? PAGE_SIZE : isize;
  484. bitmap->sb_page = read_page(bitmap->file, 0, bitmap, bytes);
  485. } else {
  486. bitmap->sb_page = read_sb_page(bitmap->mddev,
  487. bitmap->mddev->bitmap_info.offset,
  488. NULL,
  489. 0, sizeof(bitmap_super_t));
  490. }
  491. if (IS_ERR(bitmap->sb_page)) {
  492. err = PTR_ERR(bitmap->sb_page);
  493. bitmap->sb_page = NULL;
  494. return err;
  495. }
  496. sb = kmap_atomic(bitmap->sb_page, KM_USER0);
  497. chunksize = le32_to_cpu(sb->chunksize);
  498. daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
  499. write_behind = le32_to_cpu(sb->write_behind);
  500. /* verify that the bitmap-specific fields are valid */
  501. if (sb->magic != cpu_to_le32(BITMAP_MAGIC))
  502. reason = "bad magic";
  503. else if (le32_to_cpu(sb->version) < BITMAP_MAJOR_LO ||
  504. le32_to_cpu(sb->version) > BITMAP_MAJOR_HI)
  505. reason = "unrecognized superblock version";
  506. else if (chunksize < 512)
  507. reason = "bitmap chunksize too small";
  508. else if ((1 << ffz(~chunksize)) != chunksize)
  509. reason = "bitmap chunksize not a power of 2";
  510. else if (daemon_sleep < 1 || daemon_sleep > MAX_SCHEDULE_TIMEOUT)
  511. reason = "daemon sleep period out of range";
  512. else if (write_behind > COUNTER_MAX)
  513. reason = "write-behind limit out of range (0 - 16383)";
  514. if (reason) {
  515. printk(KERN_INFO "%s: invalid bitmap file superblock: %s\n",
  516. bmname(bitmap), reason);
  517. goto out;
  518. }
  519. /* keep the array size field of the bitmap superblock up to date */
  520. sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
  521. if (!bitmap->mddev->persistent)
  522. goto success;
  523. /*
  524. * if we have a persistent array superblock, compare the
  525. * bitmap's UUID and event counter to the mddev's
  526. */
  527. if (memcmp(sb->uuid, bitmap->mddev->uuid, 16)) {
  528. printk(KERN_INFO "%s: bitmap superblock UUID mismatch\n",
  529. bmname(bitmap));
  530. goto out;
  531. }
  532. events = le64_to_cpu(sb->events);
  533. if (events < bitmap->mddev->events) {
  534. printk(KERN_INFO "%s: bitmap file is out of date (%llu < %llu) "
  535. "-- forcing full recovery\n", bmname(bitmap), events,
  536. (unsigned long long) bitmap->mddev->events);
  537. sb->state |= cpu_to_le32(BITMAP_STALE);
  538. }
  539. success:
  540. /* assign fields using values from superblock */
  541. bitmap->mddev->bitmap_info.chunksize = chunksize;
  542. bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
  543. bitmap->mddev->bitmap_info.max_write_behind = write_behind;
  544. bitmap->flags |= le32_to_cpu(sb->state);
  545. if (le32_to_cpu(sb->version) == BITMAP_MAJOR_HOSTENDIAN)
  546. bitmap->flags |= BITMAP_HOSTENDIAN;
  547. bitmap->events_cleared = le64_to_cpu(sb->events_cleared);
  548. if (sb->state & cpu_to_le32(BITMAP_STALE))
  549. bitmap->events_cleared = bitmap->mddev->events;
  550. err = 0;
  551. out:
  552. kunmap_atomic(sb, KM_USER0);
  553. if (err)
  554. bitmap_print_sb(bitmap);
  555. return err;
  556. }
  557. enum bitmap_mask_op {
  558. MASK_SET,
  559. MASK_UNSET
  560. };
  561. /* record the state of the bitmap in the superblock. Return the old value */
  562. static int bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits,
  563. enum bitmap_mask_op op)
  564. {
  565. bitmap_super_t *sb;
  566. unsigned long flags;
  567. int old;
  568. spin_lock_irqsave(&bitmap->lock, flags);
  569. if (!bitmap->sb_page) { /* can't set the state */
  570. spin_unlock_irqrestore(&bitmap->lock, flags);
  571. return 0;
  572. }
  573. spin_unlock_irqrestore(&bitmap->lock, flags);
  574. sb = kmap_atomic(bitmap->sb_page, KM_USER0);
  575. old = le32_to_cpu(sb->state) & bits;
  576. switch (op) {
  577. case MASK_SET:
  578. sb->state |= cpu_to_le32(bits);
  579. break;
  580. case MASK_UNSET:
  581. sb->state &= cpu_to_le32(~bits);
  582. break;
  583. default:
  584. BUG();
  585. }
  586. kunmap_atomic(sb, KM_USER0);
  587. return old;
  588. }
  589. /*
  590. * general bitmap file operations
  591. */
  592. /*
  593. * on-disk bitmap:
  594. *
  595. * Use one bit per "chunk" (block set). We do the disk I/O on the bitmap
  596. * file a page at a time. There's a superblock at the start of the file.
  597. */
  598. /* calculate the index of the page that contains this bit */
  599. static inline unsigned long file_page_index(struct bitmap *bitmap, unsigned long chunk)
  600. {
  601. if (!bitmap->mddev->bitmap_info.external)
  602. chunk += sizeof(bitmap_super_t) << 3;
  603. return chunk >> PAGE_BIT_SHIFT;
  604. }
  605. /* calculate the (bit) offset of this bit within a page */
  606. static inline unsigned long file_page_offset(struct bitmap *bitmap, unsigned long chunk)
  607. {
  608. if (!bitmap->mddev->bitmap_info.external)
  609. chunk += sizeof(bitmap_super_t) << 3;
  610. return chunk & (PAGE_BITS - 1);
  611. }
  612. /*
  613. * return a pointer to the page in the filemap that contains the given bit
  614. *
  615. * this lookup is complicated by the fact that the bitmap sb might be exactly
  616. * 1 page (e.g., x86) or less than 1 page -- so the bitmap might start on page
  617. * 0 or page 1
  618. */
  619. static inline struct page *filemap_get_page(struct bitmap *bitmap,
  620. unsigned long chunk)
  621. {
  622. if (bitmap->filemap == NULL)
  623. return NULL;
  624. if (file_page_index(bitmap, chunk) >= bitmap->file_pages)
  625. return NULL;
  626. return bitmap->filemap[file_page_index(bitmap, chunk)
  627. - file_page_index(bitmap, 0)];
  628. }
  629. static void bitmap_file_unmap(struct bitmap *bitmap)
  630. {
  631. struct page **map, *sb_page;
  632. unsigned long *attr;
  633. int pages;
  634. unsigned long flags;
  635. spin_lock_irqsave(&bitmap->lock, flags);
  636. map = bitmap->filemap;
  637. bitmap->filemap = NULL;
  638. attr = bitmap->filemap_attr;
  639. bitmap->filemap_attr = NULL;
  640. pages = bitmap->file_pages;
  641. bitmap->file_pages = 0;
  642. sb_page = bitmap->sb_page;
  643. bitmap->sb_page = NULL;
  644. spin_unlock_irqrestore(&bitmap->lock, flags);
  645. while (pages--)
  646. if (map[pages] != sb_page) /* 0 is sb_page, release it below */
  647. free_buffers(map[pages]);
  648. kfree(map);
  649. kfree(attr);
  650. if (sb_page)
  651. free_buffers(sb_page);
  652. }
  653. static void bitmap_file_put(struct bitmap *bitmap)
  654. {
  655. struct file *file;
  656. unsigned long flags;
  657. spin_lock_irqsave(&bitmap->lock, flags);
  658. file = bitmap->file;
  659. bitmap->file = NULL;
  660. spin_unlock_irqrestore(&bitmap->lock, flags);
  661. if (file)
  662. wait_event(bitmap->write_wait,
  663. atomic_read(&bitmap->pending_writes)==0);
  664. bitmap_file_unmap(bitmap);
  665. if (file) {
  666. struct inode *inode = file->f_path.dentry->d_inode;
  667. invalidate_mapping_pages(inode->i_mapping, 0, -1);
  668. fput(file);
  669. }
  670. }
  671. /*
  672. * bitmap_file_kick - if an error occurs while manipulating the bitmap file
  673. * then it is no longer reliable, so we stop using it and we mark the file
  674. * as failed in the superblock
  675. */
  676. static void bitmap_file_kick(struct bitmap *bitmap)
  677. {
  678. char *path, *ptr = NULL;
  679. if (bitmap_mask_state(bitmap, BITMAP_STALE, MASK_SET) == 0) {
  680. bitmap_update_sb(bitmap);
  681. if (bitmap->file) {
  682. path = kmalloc(PAGE_SIZE, GFP_KERNEL);
  683. if (path)
  684. ptr = d_path(&bitmap->file->f_path, path,
  685. PAGE_SIZE);
  686. printk(KERN_ALERT
  687. "%s: kicking failed bitmap file %s from array!\n",
  688. bmname(bitmap), IS_ERR(ptr) ? "" : ptr);
  689. kfree(path);
  690. } else
  691. printk(KERN_ALERT
  692. "%s: disabling internal bitmap due to errors\n",
  693. bmname(bitmap));
  694. }
  695. bitmap_file_put(bitmap);
  696. return;
  697. }
  698. enum bitmap_page_attr {
  699. BITMAP_PAGE_DIRTY = 0, /* there are set bits that need to be synced */
  700. BITMAP_PAGE_CLEAN = 1, /* there are bits that might need to be cleared */
  701. BITMAP_PAGE_NEEDWRITE = 2, /* there are cleared bits that need to be synced */
  702. };
  703. static inline void set_page_attr(struct bitmap *bitmap, struct page *page,
  704. enum bitmap_page_attr attr)
  705. {
  706. if (page)
  707. __set_bit((page->index<<2) + attr, bitmap->filemap_attr);
  708. else
  709. __set_bit(attr, &bitmap->logattrs);
  710. }
  711. static inline void clear_page_attr(struct bitmap *bitmap, struct page *page,
  712. enum bitmap_page_attr attr)
  713. {
  714. if (page)
  715. __clear_bit((page->index<<2) + attr, bitmap->filemap_attr);
  716. else
  717. __clear_bit(attr, &bitmap->logattrs);
  718. }
  719. static inline unsigned long test_page_attr(struct bitmap *bitmap, struct page *page,
  720. enum bitmap_page_attr attr)
  721. {
  722. if (page)
  723. return test_bit((page->index<<2) + attr, bitmap->filemap_attr);
  724. else
  725. return test_bit(attr, &bitmap->logattrs);
  726. }
  727. /*
  728. * bitmap_file_set_bit -- called before performing a write to the md device
  729. * to set (and eventually sync) a particular bit in the bitmap file
  730. *
  731. * we set the bit immediately, then we record the page number so that
  732. * when an unplug occurs, we can flush the dirty pages out to disk
  733. */
  734. static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
  735. {
  736. unsigned long bit;
  737. struct page *page = NULL;
  738. void *kaddr;
  739. unsigned long chunk = block >> CHUNK_BLOCK_SHIFT(bitmap);
  740. if (!bitmap->filemap) {
  741. struct dm_dirty_log *log = bitmap->mddev->bitmap_info.log;
  742. if (log)
  743. log->type->mark_region(log, chunk);
  744. } else {
  745. page = filemap_get_page(bitmap, chunk);
  746. if (!page)
  747. return;
  748. bit = file_page_offset(bitmap, chunk);
  749. /* set the bit */
  750. kaddr = kmap_atomic(page, KM_USER0);
  751. if (bitmap->flags & BITMAP_HOSTENDIAN)
  752. set_bit(bit, kaddr);
  753. else
  754. ext2_set_bit(bit, kaddr);
  755. kunmap_atomic(kaddr, KM_USER0);
  756. PRINTK("set file bit %lu page %lu\n", bit, page->index);
  757. }
  758. /* record page number so it gets flushed to disk when unplug occurs */
  759. set_page_attr(bitmap, page, BITMAP_PAGE_DIRTY);
  760. }
  761. /* this gets called when the md device is ready to unplug its underlying
  762. * (slave) device queues -- before we let any writes go down, we need to
  763. * sync the dirty pages of the bitmap file to disk */
  764. void bitmap_unplug(struct bitmap *bitmap)
  765. {
  766. unsigned long i, flags;
  767. int dirty, need_write;
  768. struct page *page;
  769. int wait = 0;
  770. if (!bitmap)
  771. return;
  772. if (!bitmap->filemap) {
  773. /* Must be using a dirty_log */
  774. struct dm_dirty_log *log = bitmap->mddev->bitmap_info.log;
  775. dirty = test_and_clear_bit(BITMAP_PAGE_DIRTY, &bitmap->logattrs);
  776. need_write = test_and_clear_bit(BITMAP_PAGE_NEEDWRITE, &bitmap->logattrs);
  777. if (dirty || need_write)
  778. if (log->type->flush(log))
  779. bitmap->flags |= BITMAP_WRITE_ERROR;
  780. goto out;
  781. }
  782. /* look at each page to see if there are any set bits that need to be
  783. * flushed out to disk */
  784. for (i = 0; i < bitmap->file_pages; i++) {
  785. spin_lock_irqsave(&bitmap->lock, flags);
  786. if (!bitmap->filemap) {
  787. spin_unlock_irqrestore(&bitmap->lock, flags);
  788. return;
  789. }
  790. page = bitmap->filemap[i];
  791. dirty = test_page_attr(bitmap, page, BITMAP_PAGE_DIRTY);
  792. need_write = test_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE);
  793. clear_page_attr(bitmap, page, BITMAP_PAGE_DIRTY);
  794. clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE);
  795. if (dirty)
  796. wait = 1;
  797. spin_unlock_irqrestore(&bitmap->lock, flags);
  798. if (dirty || need_write)
  799. write_page(bitmap, page, 0);
  800. }
  801. if (wait) { /* if any writes were performed, we need to wait on them */
  802. if (bitmap->file)
  803. wait_event(bitmap->write_wait,
  804. atomic_read(&bitmap->pending_writes)==0);
  805. else
  806. md_super_wait(bitmap->mddev);
  807. }
  808. out:
  809. if (bitmap->flags & BITMAP_WRITE_ERROR)
  810. bitmap_file_kick(bitmap);
  811. }
  812. EXPORT_SYMBOL(bitmap_unplug);
  813. static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed);
  814. /* * bitmap_init_from_disk -- called at bitmap_create time to initialize
  815. * the in-memory bitmap from the on-disk bitmap -- also, sets up the
  816. * memory mapping of the bitmap file
  817. * Special cases:
  818. * if there's no bitmap file, or if the bitmap file had been
  819. * previously kicked from the array, we mark all the bits as
  820. * 1's in order to cause a full resync.
  821. *
  822. * We ignore all bits for sectors that end earlier than 'start'.
  823. * This is used when reading an out-of-date bitmap...
  824. */
  825. static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
  826. {
  827. unsigned long i, chunks, index, oldindex, bit;
  828. struct page *page = NULL, *oldpage = NULL;
  829. unsigned long num_pages, bit_cnt = 0;
  830. struct file *file;
  831. unsigned long bytes, offset;
  832. int outofdate;
  833. int ret = -ENOSPC;
  834. void *paddr;
  835. chunks = bitmap->chunks;
  836. file = bitmap->file;
  837. BUG_ON(!file && !bitmap->mddev->bitmap_info.offset);
  838. #ifdef INJECT_FAULTS_3
  839. outofdate = 1;
  840. #else
  841. outofdate = bitmap->flags & BITMAP_STALE;
  842. #endif
  843. if (outofdate)
  844. printk(KERN_INFO "%s: bitmap file is out of date, doing full "
  845. "recovery\n", bmname(bitmap));
  846. bytes = DIV_ROUND_UP(bitmap->chunks, 8);
  847. if (!bitmap->mddev->bitmap_info.external)
  848. bytes += sizeof(bitmap_super_t);
  849. num_pages = DIV_ROUND_UP(bytes, PAGE_SIZE);
  850. if (file && i_size_read(file->f_mapping->host) < bytes) {
  851. printk(KERN_INFO "%s: bitmap file too short %lu < %lu\n",
  852. bmname(bitmap),
  853. (unsigned long) i_size_read(file->f_mapping->host),
  854. bytes);
  855. goto err;
  856. }
  857. ret = -ENOMEM;
  858. bitmap->filemap = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL);
  859. if (!bitmap->filemap)
  860. goto err;
  861. /* We need 4 bits per page, rounded up to a multiple of sizeof(unsigned long) */
  862. bitmap->filemap_attr = kzalloc(
  863. roundup(DIV_ROUND_UP(num_pages*4, 8), sizeof(unsigned long)),
  864. GFP_KERNEL);
  865. if (!bitmap->filemap_attr)
  866. goto err;
  867. oldindex = ~0L;
  868. for (i = 0; i < chunks; i++) {
  869. int b;
  870. index = file_page_index(bitmap, i);
  871. bit = file_page_offset(bitmap, i);
  872. if (index != oldindex) { /* this is a new page, read it in */
  873. int count;
  874. /* unmap the old page, we're done with it */
  875. if (index == num_pages-1)
  876. count = bytes - index * PAGE_SIZE;
  877. else
  878. count = PAGE_SIZE;
  879. if (index == 0 && bitmap->sb_page) {
  880. /*
  881. * if we're here then the superblock page
  882. * contains some bits (PAGE_SIZE != sizeof sb)
  883. * we've already read it in, so just use it
  884. */
  885. page = bitmap->sb_page;
  886. offset = sizeof(bitmap_super_t);
  887. if (!file)
  888. page = read_sb_page(
  889. bitmap->mddev,
  890. bitmap->mddev->bitmap_info.offset,
  891. page,
  892. index, count);
  893. } else if (file) {
  894. page = read_page(file, index, bitmap, count);
  895. offset = 0;
  896. } else {
  897. page = read_sb_page(bitmap->mddev,
  898. bitmap->mddev->bitmap_info.offset,
  899. NULL,
  900. index, count);
  901. offset = 0;
  902. }
  903. if (IS_ERR(page)) { /* read error */
  904. ret = PTR_ERR(page);
  905. goto err;
  906. }
  907. oldindex = index;
  908. oldpage = page;
  909. bitmap->filemap[bitmap->file_pages++] = page;
  910. bitmap->last_page_size = count;
  911. if (outofdate) {
  912. /*
  913. * if bitmap is out of date, dirty the
  914. * whole page and write it out
  915. */
  916. paddr = kmap_atomic(page, KM_USER0);
  917. memset(paddr + offset, 0xff,
  918. PAGE_SIZE - offset);
  919. kunmap_atomic(paddr, KM_USER0);
  920. write_page(bitmap, page, 1);
  921. ret = -EIO;
  922. if (bitmap->flags & BITMAP_WRITE_ERROR)
  923. goto err;
  924. }
  925. }
  926. paddr = kmap_atomic(page, KM_USER0);
  927. if (bitmap->flags & BITMAP_HOSTENDIAN)
  928. b = test_bit(bit, paddr);
  929. else
  930. b = ext2_test_bit(bit, paddr);
  931. kunmap_atomic(paddr, KM_USER0);
  932. if (b) {
  933. /* if the disk bit is set, set the memory bit */
  934. int needed = ((sector_t)(i+1) << (CHUNK_BLOCK_SHIFT(bitmap))
  935. >= start);
  936. bitmap_set_memory_bits(bitmap,
  937. (sector_t)i << CHUNK_BLOCK_SHIFT(bitmap),
  938. needed);
  939. bit_cnt++;
  940. set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN);
  941. }
  942. }
  943. /* everything went OK */
  944. ret = 0;
  945. bitmap_mask_state(bitmap, BITMAP_STALE, MASK_UNSET);
  946. if (bit_cnt) { /* Kick recovery if any bits were set */
  947. set_bit(MD_RECOVERY_NEEDED, &bitmap->mddev->recovery);
  948. md_wakeup_thread(bitmap->mddev->thread);
  949. }
  950. printk(KERN_INFO "%s: bitmap initialized from disk: "
  951. "read %lu/%lu pages, set %lu bits\n",
  952. bmname(bitmap), bitmap->file_pages, num_pages, bit_cnt);
  953. return 0;
  954. err:
  955. printk(KERN_INFO "%s: bitmap initialisation failed: %d\n",
  956. bmname(bitmap), ret);
  957. return ret;
  958. }
  959. void bitmap_write_all(struct bitmap *bitmap)
  960. {
  961. /* We don't actually write all bitmap blocks here,
  962. * just flag them as needing to be written
  963. */
  964. int i;
  965. for (i = 0; i < bitmap->file_pages; i++)
  966. set_page_attr(bitmap, bitmap->filemap[i],
  967. BITMAP_PAGE_NEEDWRITE);
  968. }
  969. static void bitmap_count_page(struct bitmap *bitmap, sector_t offset, int inc)
  970. {
  971. sector_t chunk = offset >> CHUNK_BLOCK_SHIFT(bitmap);
  972. unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
  973. bitmap->bp[page].count += inc;
  974. bitmap_checkfree(bitmap, page);
  975. }
  976. static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap,
  977. sector_t offset, sector_t *blocks,
  978. int create);
  979. /*
  980. * bitmap daemon -- periodically wakes up to clean bits and flush pages
  981. * out to disk
  982. */
  983. void bitmap_daemon_work(mddev_t *mddev)
  984. {
  985. struct bitmap *bitmap;
  986. unsigned long j;
  987. unsigned long flags;
  988. struct page *page = NULL, *lastpage = NULL;
  989. sector_t blocks;
  990. void *paddr;
  991. struct dm_dirty_log *log = mddev->bitmap_info.log;
  992. /* Use a mutex to guard daemon_work against
  993. * bitmap_destroy.
  994. */
  995. mutex_lock(&mddev->bitmap_info.mutex);
  996. bitmap = mddev->bitmap;
  997. if (bitmap == NULL) {
  998. mutex_unlock(&mddev->bitmap_info.mutex);
  999. return;
  1000. }
  1001. if (time_before(jiffies, bitmap->daemon_lastrun
  1002. + bitmap->mddev->bitmap_info.daemon_sleep))
  1003. goto done;
  1004. bitmap->daemon_lastrun = jiffies;
  1005. if (bitmap->allclean) {
  1006. bitmap->mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
  1007. goto done;
  1008. }
  1009. bitmap->allclean = 1;
  1010. spin_lock_irqsave(&bitmap->lock, flags);
  1011. for (j = 0; j < bitmap->chunks; j++) {
  1012. bitmap_counter_t *bmc;
  1013. if (!bitmap->filemap) {
  1014. if (!log)
  1015. /* error or shutdown */
  1016. break;
  1017. } else
  1018. page = filemap_get_page(bitmap, j);
  1019. if (page != lastpage) {
  1020. /* skip this page unless it's marked as needing cleaning */
  1021. if (!test_page_attr(bitmap, page, BITMAP_PAGE_CLEAN)) {
  1022. int need_write = test_page_attr(bitmap, page,
  1023. BITMAP_PAGE_NEEDWRITE);
  1024. if (need_write)
  1025. clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE);
  1026. spin_unlock_irqrestore(&bitmap->lock, flags);
  1027. if (need_write) {
  1028. write_page(bitmap, page, 0);
  1029. bitmap->allclean = 0;
  1030. }
  1031. spin_lock_irqsave(&bitmap->lock, flags);
  1032. j |= (PAGE_BITS - 1);
  1033. continue;
  1034. }
  1035. /* grab the new page, sync and release the old */
  1036. if (lastpage != NULL) {
  1037. if (test_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE)) {
  1038. clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
  1039. spin_unlock_irqrestore(&bitmap->lock, flags);
  1040. write_page(bitmap, lastpage, 0);
  1041. } else {
  1042. set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
  1043. spin_unlock_irqrestore(&bitmap->lock, flags);
  1044. }
  1045. } else
  1046. spin_unlock_irqrestore(&bitmap->lock, flags);
  1047. lastpage = page;
  1048. /* We are possibly going to clear some bits, so make
  1049. * sure that events_cleared is up-to-date.
  1050. */
  1051. if (bitmap->need_sync &&
  1052. bitmap->mddev->bitmap_info.external == 0) {
  1053. bitmap_super_t *sb;
  1054. bitmap->need_sync = 0;
  1055. sb = kmap_atomic(bitmap->sb_page, KM_USER0);
  1056. sb->events_cleared =
  1057. cpu_to_le64(bitmap->events_cleared);
  1058. kunmap_atomic(sb, KM_USER0);
  1059. write_page(bitmap, bitmap->sb_page, 1);
  1060. }
  1061. spin_lock_irqsave(&bitmap->lock, flags);
  1062. if (!bitmap->need_sync)
  1063. clear_page_attr(bitmap, page, BITMAP_PAGE_CLEAN);
  1064. }
  1065. bmc = bitmap_get_counter(bitmap,
  1066. (sector_t)j << CHUNK_BLOCK_SHIFT(bitmap),
  1067. &blocks, 0);
  1068. if (bmc) {
  1069. if (*bmc)
  1070. bitmap->allclean = 0;
  1071. if (*bmc == 2) {
  1072. *bmc = 1; /* maybe clear the bit next time */
  1073. set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN);
  1074. } else if (*bmc == 1 && !bitmap->need_sync) {
  1075. /* we can clear the bit */
  1076. *bmc = 0;
  1077. bitmap_count_page(bitmap,
  1078. (sector_t)j << CHUNK_BLOCK_SHIFT(bitmap),
  1079. -1);
  1080. /* clear the bit */
  1081. if (page) {
  1082. paddr = kmap_atomic(page, KM_USER0);
  1083. if (bitmap->flags & BITMAP_HOSTENDIAN)
  1084. clear_bit(file_page_offset(bitmap, j),
  1085. paddr);
  1086. else
  1087. ext2_clear_bit(file_page_offset(bitmap, j),
  1088. paddr);
  1089. kunmap_atomic(paddr, KM_USER0);
  1090. } else
  1091. log->type->clear_region(log, j);
  1092. }
  1093. } else
  1094. j |= PAGE_COUNTER_MASK;
  1095. }
  1096. spin_unlock_irqrestore(&bitmap->lock, flags);
  1097. /* now sync the final page */
  1098. if (lastpage != NULL || log != NULL) {
  1099. spin_lock_irqsave(&bitmap->lock, flags);
  1100. if (test_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE)) {
  1101. clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
  1102. spin_unlock_irqrestore(&bitmap->lock, flags);
  1103. if (lastpage)
  1104. write_page(bitmap, lastpage, 0);
  1105. else
  1106. if (log->type->flush(log))
  1107. bitmap->flags |= BITMAP_WRITE_ERROR;
  1108. } else {
  1109. set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
  1110. spin_unlock_irqrestore(&bitmap->lock, flags);
  1111. }
  1112. }
  1113. done:
  1114. if (bitmap->allclean == 0)
  1115. bitmap->mddev->thread->timeout =
  1116. bitmap->mddev->bitmap_info.daemon_sleep;
  1117. mutex_unlock(&mddev->bitmap_info.mutex);
  1118. }
  1119. static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap,
  1120. sector_t offset, sector_t *blocks,
  1121. int create)
  1122. __releases(bitmap->lock)
  1123. __acquires(bitmap->lock)
  1124. {
  1125. /* If 'create', we might release the lock and reclaim it.
  1126. * The lock must have been taken with interrupts enabled.
  1127. * If !create, we don't release the lock.
  1128. */
  1129. sector_t chunk = offset >> CHUNK_BLOCK_SHIFT(bitmap);
  1130. unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
  1131. unsigned long pageoff = (chunk & PAGE_COUNTER_MASK) << COUNTER_BYTE_SHIFT;
  1132. sector_t csize;
  1133. int err;
  1134. err = bitmap_checkpage(bitmap, page, create);
  1135. if (bitmap->bp[page].hijacked ||
  1136. bitmap->bp[page].map == NULL)
  1137. csize = ((sector_t)1) << (CHUNK_BLOCK_SHIFT(bitmap) +
  1138. PAGE_COUNTER_SHIFT - 1);
  1139. else
  1140. csize = ((sector_t)1) << (CHUNK_BLOCK_SHIFT(bitmap));
  1141. *blocks = csize - (offset & (csize - 1));
  1142. if (err < 0)
  1143. return NULL;
  1144. /* now locked ... */
  1145. if (bitmap->bp[page].hijacked) { /* hijacked pointer */
  1146. /* should we use the first or second counter field
  1147. * of the hijacked pointer? */
  1148. int hi = (pageoff > PAGE_COUNTER_MASK);
  1149. return &((bitmap_counter_t *)
  1150. &bitmap->bp[page].map)[hi];
  1151. } else /* page is allocated */
  1152. return (bitmap_counter_t *)
  1153. &(bitmap->bp[page].map[pageoff]);
  1154. }
  1155. int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, int behind)
  1156. {
  1157. if (!bitmap)
  1158. return 0;
  1159. if (behind) {
  1160. int bw;
  1161. atomic_inc(&bitmap->behind_writes);
  1162. bw = atomic_read(&bitmap->behind_writes);
  1163. if (bw > bitmap->behind_writes_used)
  1164. bitmap->behind_writes_used = bw;
  1165. PRINTK(KERN_DEBUG "inc write-behind count %d/%d\n",
  1166. bw, bitmap->max_write_behind);
  1167. }
  1168. while (sectors) {
  1169. sector_t blocks;
  1170. bitmap_counter_t *bmc;
  1171. spin_lock_irq(&bitmap->lock);
  1172. bmc = bitmap_get_counter(bitmap, offset, &blocks, 1);
  1173. if (!bmc) {
  1174. spin_unlock_irq(&bitmap->lock);
  1175. return 0;
  1176. }
  1177. if (unlikely((*bmc & COUNTER_MAX) == COUNTER_MAX)) {
  1178. DEFINE_WAIT(__wait);
  1179. /* note that it is safe to do the prepare_to_wait
  1180. * after the test as long as we do it before dropping
  1181. * the spinlock.
  1182. */
  1183. prepare_to_wait(&bitmap->overflow_wait, &__wait,
  1184. TASK_UNINTERRUPTIBLE);
  1185. spin_unlock_irq(&bitmap->lock);
  1186. md_unplug(bitmap->mddev);
  1187. schedule();
  1188. finish_wait(&bitmap->overflow_wait, &__wait);
  1189. continue;
  1190. }
  1191. switch (*bmc) {
  1192. case 0:
  1193. bitmap_file_set_bit(bitmap, offset);
  1194. bitmap_count_page(bitmap, offset, 1);
  1195. /* fall through */
  1196. case 1:
  1197. *bmc = 2;
  1198. }
  1199. (*bmc)++;
  1200. spin_unlock_irq(&bitmap->lock);
  1201. offset += blocks;
  1202. if (sectors > blocks)
  1203. sectors -= blocks;
  1204. else
  1205. sectors = 0;
  1206. }
  1207. bitmap->allclean = 0;
  1208. return 0;
  1209. }
  1210. EXPORT_SYMBOL(bitmap_startwrite);
  1211. void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors,
  1212. int success, int behind)
  1213. {
  1214. if (!bitmap)
  1215. return;
  1216. if (behind) {
  1217. if (atomic_dec_and_test(&bitmap->behind_writes))
  1218. wake_up(&bitmap->behind_wait);
  1219. PRINTK(KERN_DEBUG "dec write-behind count %d/%d\n",
  1220. atomic_read(&bitmap->behind_writes), bitmap->max_write_behind);
  1221. }
  1222. if (bitmap->mddev->degraded)
  1223. /* Never clear bits or update events_cleared when degraded */
  1224. success = 0;
  1225. while (sectors) {
  1226. sector_t blocks;
  1227. unsigned long flags;
  1228. bitmap_counter_t *bmc;
  1229. spin_lock_irqsave(&bitmap->lock, flags);
  1230. bmc = bitmap_get_counter(bitmap, offset, &blocks, 0);
  1231. if (!bmc) {
  1232. spin_unlock_irqrestore(&bitmap->lock, flags);
  1233. return;
  1234. }
  1235. if (success &&
  1236. bitmap->events_cleared < bitmap->mddev->events) {
  1237. bitmap->events_cleared = bitmap->mddev->events;
  1238. bitmap->need_sync = 1;
  1239. sysfs_notify_dirent_safe(bitmap->sysfs_can_clear);
  1240. }
  1241. if (!success && ! (*bmc & NEEDED_MASK))
  1242. *bmc |= NEEDED_MASK;
  1243. if ((*bmc & COUNTER_MAX) == COUNTER_MAX)
  1244. wake_up(&bitmap->overflow_wait);
  1245. (*bmc)--;
  1246. if (*bmc <= 2)
  1247. set_page_attr(bitmap,
  1248. filemap_get_page(
  1249. bitmap,
  1250. offset >> CHUNK_BLOCK_SHIFT(bitmap)),
  1251. BITMAP_PAGE_CLEAN);
  1252. spin_unlock_irqrestore(&bitmap->lock, flags);
  1253. offset += blocks;
  1254. if (sectors > blocks)
  1255. sectors -= blocks;
  1256. else
  1257. sectors = 0;
  1258. }
  1259. }
  1260. EXPORT_SYMBOL(bitmap_endwrite);
  1261. static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
  1262. int degraded)
  1263. {
  1264. bitmap_counter_t *bmc;
  1265. int rv;
  1266. if (bitmap == NULL) {/* FIXME or bitmap set as 'failed' */
  1267. *blocks = 1024;
  1268. return 1; /* always resync if no bitmap */
  1269. }
  1270. spin_lock_irq(&bitmap->lock);
  1271. bmc = bitmap_get_counter(bitmap, offset, blocks, 0);
  1272. rv = 0;
  1273. if (bmc) {
  1274. /* locked */
  1275. if (RESYNC(*bmc))
  1276. rv = 1;
  1277. else if (NEEDED(*bmc)) {
  1278. rv = 1;
  1279. if (!degraded) { /* don't set/clear bits if degraded */
  1280. *bmc |= RESYNC_MASK;
  1281. *bmc &= ~NEEDED_MASK;
  1282. }
  1283. }
  1284. }
  1285. spin_unlock_irq(&bitmap->lock);
  1286. bitmap->allclean = 0;
  1287. return rv;
  1288. }
  1289. int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
  1290. int degraded)
  1291. {
  1292. /* bitmap_start_sync must always report on multiples of whole
  1293. * pages, otherwise resync (which is very PAGE_SIZE based) will
  1294. * get confused.
  1295. * So call __bitmap_start_sync repeatedly (if needed) until
  1296. * At least PAGE_SIZE>>9 blocks are covered.
  1297. * Return the 'or' of the result.
  1298. */
  1299. int rv = 0;
  1300. sector_t blocks1;
  1301. *blocks = 0;
  1302. while (*blocks < (PAGE_SIZE>>9)) {
  1303. rv |= __bitmap_start_sync(bitmap, offset,
  1304. &blocks1, degraded);
  1305. offset += blocks1;
  1306. *blocks += blocks1;
  1307. }
  1308. return rv;
  1309. }
  1310. EXPORT_SYMBOL(bitmap_start_sync);
  1311. void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted)
  1312. {
  1313. bitmap_counter_t *bmc;
  1314. unsigned long flags;
  1315. if (bitmap == NULL) {
  1316. *blocks = 1024;
  1317. return;
  1318. }
  1319. spin_lock_irqsave(&bitmap->lock, flags);
  1320. bmc = bitmap_get_counter(bitmap, offset, blocks, 0);
  1321. if (bmc == NULL)
  1322. goto unlock;
  1323. /* locked */
  1324. if (RESYNC(*bmc)) {
  1325. *bmc &= ~RESYNC_MASK;
  1326. if (!NEEDED(*bmc) && aborted)
  1327. *bmc |= NEEDED_MASK;
  1328. else {
  1329. if (*bmc <= 2)
  1330. set_page_attr(bitmap,
  1331. filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap)),
  1332. BITMAP_PAGE_CLEAN);
  1333. }
  1334. }
  1335. unlock:
  1336. spin_unlock_irqrestore(&bitmap->lock, flags);
  1337. bitmap->allclean = 0;
  1338. }
  1339. EXPORT_SYMBOL(bitmap_end_sync);
  1340. void bitmap_close_sync(struct bitmap *bitmap)
  1341. {
  1342. /* Sync has finished, and any bitmap chunks that weren't synced
  1343. * properly have been aborted. It remains to us to clear the
  1344. * RESYNC bit wherever it is still on
  1345. */
  1346. sector_t sector = 0;
  1347. sector_t blocks;
  1348. if (!bitmap)
  1349. return;
  1350. while (sector < bitmap->mddev->resync_max_sectors) {
  1351. bitmap_end_sync(bitmap, sector, &blocks, 0);
  1352. sector += blocks;
  1353. }
  1354. }
  1355. EXPORT_SYMBOL(bitmap_close_sync);
  1356. void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector)
  1357. {
  1358. sector_t s = 0;
  1359. sector_t blocks;
  1360. if (!bitmap)
  1361. return;
  1362. if (sector == 0) {
  1363. bitmap->last_end_sync = jiffies;
  1364. return;
  1365. }
  1366. if (time_before(jiffies, (bitmap->last_end_sync
  1367. + bitmap->mddev->bitmap_info.daemon_sleep)))
  1368. return;
  1369. wait_event(bitmap->mddev->recovery_wait,
  1370. atomic_read(&bitmap->mddev->recovery_active) == 0);
  1371. bitmap->mddev->curr_resync_completed = bitmap->mddev->curr_resync;
  1372. set_bit(MD_CHANGE_CLEAN, &bitmap->mddev->flags);
  1373. sector &= ~((1ULL << CHUNK_BLOCK_SHIFT(bitmap)) - 1);
  1374. s = 0;
  1375. while (s < sector && s < bitmap->mddev->resync_max_sectors) {
  1376. bitmap_end_sync(bitmap, s, &blocks, 0);
  1377. s += blocks;
  1378. }
  1379. bitmap->last_end_sync = jiffies;
  1380. sysfs_notify(&bitmap->mddev->kobj, NULL, "sync_completed");
  1381. }
  1382. EXPORT_SYMBOL(bitmap_cond_end_sync);
  1383. static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed)
  1384. {
  1385. /* For each chunk covered by any of these sectors, set the
  1386. * counter to 1 and set resync_needed. They should all
  1387. * be 0 at this point
  1388. */
  1389. sector_t secs;
  1390. bitmap_counter_t *bmc;
  1391. spin_lock_irq(&bitmap->lock);
  1392. bmc = bitmap_get_counter(bitmap, offset, &secs, 1);
  1393. if (!bmc) {
  1394. spin_unlock_irq(&bitmap->lock);
  1395. return;
  1396. }
  1397. if (!*bmc) {
  1398. struct page *page;
  1399. *bmc = 1 | (needed ? NEEDED_MASK : 0);
  1400. bitmap_count_page(bitmap, offset, 1);
  1401. page = filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap));
  1402. set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN);
  1403. }
  1404. spin_unlock_irq(&bitmap->lock);
  1405. bitmap->allclean = 0;
  1406. }
  1407. /* dirty the memory and file bits for bitmap chunks "s" to "e" */
  1408. void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e)
  1409. {
  1410. unsigned long chunk;
  1411. for (chunk = s; chunk <= e; chunk++) {
  1412. sector_t sec = (sector_t)chunk << CHUNK_BLOCK_SHIFT(bitmap);
  1413. bitmap_set_memory_bits(bitmap, sec, 1);
  1414. bitmap_file_set_bit(bitmap, sec);
  1415. if (sec < bitmap->mddev->recovery_cp)
  1416. /* We are asserting that the array is dirty,
  1417. * so move the recovery_cp address back so
  1418. * that it is obvious that it is dirty
  1419. */
  1420. bitmap->mddev->recovery_cp = sec;
  1421. }
  1422. }
  1423. /*
  1424. * flush out any pending updates
  1425. */
  1426. void bitmap_flush(mddev_t *mddev)
  1427. {
  1428. struct bitmap *bitmap = mddev->bitmap;
  1429. long sleep;
  1430. if (!bitmap) /* there was no bitmap */
  1431. return;
  1432. /* run the daemon_work three time to ensure everything is flushed
  1433. * that can be
  1434. */
  1435. sleep = mddev->bitmap_info.daemon_sleep * 2;
  1436. bitmap->daemon_lastrun -= sleep;
  1437. bitmap_daemon_work(mddev);
  1438. bitmap->daemon_lastrun -= sleep;
  1439. bitmap_daemon_work(mddev);
  1440. bitmap->daemon_lastrun -= sleep;
  1441. bitmap_daemon_work(mddev);
  1442. bitmap_update_sb(bitmap);
  1443. }
  1444. /*
  1445. * free memory that was allocated
  1446. */
  1447. static void bitmap_free(struct bitmap *bitmap)
  1448. {
  1449. unsigned long k, pages;
  1450. struct bitmap_page *bp;
  1451. if (!bitmap) /* there was no bitmap */
  1452. return;
  1453. /* release the bitmap file and kill the daemon */
  1454. bitmap_file_put(bitmap);
  1455. bp = bitmap->bp;
  1456. pages = bitmap->pages;
  1457. /* free all allocated memory */
  1458. if (bp) /* deallocate the page memory */
  1459. for (k = 0; k < pages; k++)
  1460. if (bp[k].map && !bp[k].hijacked)
  1461. kfree(bp[k].map);
  1462. kfree(bp);
  1463. kfree(bitmap);
  1464. }
  1465. void bitmap_destroy(mddev_t *mddev)
  1466. {
  1467. struct bitmap *bitmap = mddev->bitmap;
  1468. if (!bitmap) /* there was no bitmap */
  1469. return;
  1470. mutex_lock(&mddev->bitmap_info.mutex);
  1471. mddev->bitmap = NULL; /* disconnect from the md device */
  1472. mutex_unlock(&mddev->bitmap_info.mutex);
  1473. if (mddev->thread)
  1474. mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
  1475. if (bitmap->sysfs_can_clear)
  1476. sysfs_put(bitmap->sysfs_can_clear);
  1477. bitmap_free(bitmap);
  1478. }
  1479. /*
  1480. * initialize the bitmap structure
  1481. * if this returns an error, bitmap_destroy must be called to do clean up
  1482. */
  1483. int bitmap_create(mddev_t *mddev)
  1484. {
  1485. struct bitmap *bitmap;
  1486. sector_t blocks = mddev->resync_max_sectors;
  1487. unsigned long chunks;
  1488. unsigned long pages;
  1489. struct file *file = mddev->bitmap_info.file;
  1490. int err;
  1491. struct sysfs_dirent *bm = NULL;
  1492. BUILD_BUG_ON(sizeof(bitmap_super_t) != 256);
  1493. if (!file
  1494. && !mddev->bitmap_info.offset
  1495. && !mddev->bitmap_info.log) /* bitmap disabled, nothing to do */
  1496. return 0;
  1497. BUG_ON(file && mddev->bitmap_info.offset);
  1498. BUG_ON(mddev->bitmap_info.offset && mddev->bitmap_info.log);
  1499. bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL);
  1500. if (!bitmap)
  1501. return -ENOMEM;
  1502. spin_lock_init(&bitmap->lock);
  1503. atomic_set(&bitmap->pending_writes, 0);
  1504. init_waitqueue_head(&bitmap->write_wait);
  1505. init_waitqueue_head(&bitmap->overflow_wait);
  1506. init_waitqueue_head(&bitmap->behind_wait);
  1507. bitmap->mddev = mddev;
  1508. if (mddev->kobj.sd)
  1509. bm = sysfs_get_dirent(mddev->kobj.sd, NULL, "bitmap");
  1510. if (bm) {
  1511. bitmap->sysfs_can_clear = sysfs_get_dirent(bm, NULL, "can_clear");
  1512. sysfs_put(bm);
  1513. } else
  1514. bitmap->sysfs_can_clear = NULL;
  1515. bitmap->file = file;
  1516. if (file) {
  1517. get_file(file);
  1518. /* As future accesses to this file will use bmap,
  1519. * and bypass the page cache, we must sync the file
  1520. * first.
  1521. */
  1522. vfs_fsync(file, 1);
  1523. }
  1524. /* read superblock from bitmap file (this sets mddev->bitmap_info.chunksize) */
  1525. if (!mddev->bitmap_info.external)
  1526. err = bitmap_read_sb(bitmap);
  1527. else {
  1528. err = 0;
  1529. if (mddev->bitmap_info.chunksize == 0 ||
  1530. mddev->bitmap_info.daemon_sleep == 0)
  1531. /* chunksize and time_base need to be
  1532. * set first. */
  1533. err = -EINVAL;
  1534. }
  1535. if (err)
  1536. goto error;
  1537. bitmap->daemon_lastrun = jiffies;
  1538. bitmap->chunkshift = ffz(~mddev->bitmap_info.chunksize);
  1539. /* now that chunksize and chunkshift are set, we can use these macros */
  1540. chunks = (blocks + CHUNK_BLOCK_RATIO(bitmap) - 1) >>
  1541. CHUNK_BLOCK_SHIFT(bitmap);
  1542. pages = (chunks + PAGE_COUNTER_RATIO - 1) / PAGE_COUNTER_RATIO;
  1543. BUG_ON(!pages);
  1544. bitmap->chunks = chunks;
  1545. bitmap->pages = pages;
  1546. bitmap->missing_pages = pages;
  1547. bitmap->counter_bits = COUNTER_BITS;
  1548. bitmap->syncchunk = ~0UL;
  1549. #ifdef INJECT_FATAL_FAULT_1
  1550. bitmap->bp = NULL;
  1551. #else
  1552. bitmap->bp = kzalloc(pages * sizeof(*bitmap->bp), GFP_KERNEL);
  1553. #endif
  1554. err = -ENOMEM;
  1555. if (!bitmap->bp)
  1556. goto error;
  1557. printk(KERN_INFO "created bitmap (%lu pages) for device %s\n",
  1558. pages, bmname(bitmap));
  1559. mddev->bitmap = bitmap;
  1560. return (bitmap->flags & BITMAP_WRITE_ERROR) ? -EIO : 0;
  1561. error:
  1562. bitmap_free(bitmap);
  1563. return err;
  1564. }
  1565. int bitmap_load(mddev_t *mddev)
  1566. {
  1567. int err = 0;
  1568. sector_t sector = 0;
  1569. struct bitmap *bitmap = mddev->bitmap;
  1570. if (!bitmap)
  1571. goto out;
  1572. /* Clear out old bitmap info first: Either there is none, or we
  1573. * are resuming after someone else has possibly changed things,
  1574. * so we should forget old cached info.
  1575. * All chunks should be clean, but some might need_sync.
  1576. */
  1577. while (sector < mddev->resync_max_sectors) {
  1578. sector_t blocks;
  1579. bitmap_start_sync(bitmap, sector, &blocks, 0);
  1580. sector += blocks;
  1581. }
  1582. bitmap_close_sync(bitmap);
  1583. if (mddev->bitmap_info.log) {
  1584. unsigned long i;
  1585. struct dm_dirty_log *log = mddev->bitmap_info.log;
  1586. for (i = 0; i < bitmap->chunks; i++)
  1587. if (!log->type->in_sync(log, i, 1))
  1588. bitmap_set_memory_bits(bitmap,
  1589. (sector_t)i << CHUNK_BLOCK_SHIFT(bitmap),
  1590. 1);
  1591. } else {
  1592. sector_t start = 0;
  1593. if (mddev->degraded == 0
  1594. || bitmap->events_cleared == mddev->events)
  1595. /* no need to keep dirty bits to optimise a
  1596. * re-add of a missing device */
  1597. start = mddev->recovery_cp;
  1598. err = bitmap_init_from_disk(bitmap, start);
  1599. }
  1600. if (err)
  1601. goto out;
  1602. mddev->thread->timeout = mddev->bitmap_info.daemon_sleep;
  1603. md_wakeup_thread(mddev->thread);
  1604. bitmap_update_sb(bitmap);
  1605. if (bitmap->flags & BITMAP_WRITE_ERROR)
  1606. err = -EIO;
  1607. out:
  1608. return err;
  1609. }
  1610. EXPORT_SYMBOL_GPL(bitmap_load);
  1611. static ssize_t
  1612. location_show(mddev_t *mddev, char *page)
  1613. {
  1614. ssize_t len;
  1615. if (mddev->bitmap_info.file)
  1616. len = sprintf(page, "file");
  1617. else if (mddev->bitmap_info.offset)
  1618. len = sprintf(page, "%+lld", (long long)mddev->bitmap_info.offset);
  1619. else
  1620. len = sprintf(page, "none");
  1621. len += sprintf(page+len, "\n");
  1622. return len;
  1623. }
  1624. static ssize_t
  1625. location_store(mddev_t *mddev, const char *buf, size_t len)
  1626. {
  1627. if (mddev->pers) {
  1628. if (!mddev->pers->quiesce)
  1629. return -EBUSY;
  1630. if (mddev->recovery || mddev->sync_thread)
  1631. return -EBUSY;
  1632. }
  1633. if (mddev->bitmap || mddev->bitmap_info.file ||
  1634. mddev->bitmap_info.offset) {
  1635. /* bitmap already configured. Only option is to clear it */
  1636. if (strncmp(buf, "none", 4) != 0)
  1637. return -EBUSY;
  1638. if (mddev->pers) {
  1639. mddev->pers->quiesce(mddev, 1);
  1640. bitmap_destroy(mddev);
  1641. mddev->pers->quiesce(mddev, 0);
  1642. }
  1643. mddev->bitmap_info.offset = 0;
  1644. if (mddev->bitmap_info.file) {
  1645. struct file *f = mddev->bitmap_info.file;
  1646. mddev->bitmap_info.file = NULL;
  1647. restore_bitmap_write_access(f);
  1648. fput(f);
  1649. }
  1650. } else {
  1651. /* No bitmap, OK to set a location */
  1652. long long offset;
  1653. if (strncmp(buf, "none", 4) == 0)
  1654. /* nothing to be done */;
  1655. else if (strncmp(buf, "file:", 5) == 0) {
  1656. /* Not supported yet */
  1657. return -EINVAL;
  1658. } else {
  1659. int rv;
  1660. if (buf[0] == '+')
  1661. rv = strict_strtoll(buf+1, 10, &offset);
  1662. else
  1663. rv = strict_strtoll(buf, 10, &offset);
  1664. if (rv)
  1665. return rv;
  1666. if (offset == 0)
  1667. return -EINVAL;
  1668. if (mddev->bitmap_info.external == 0 &&
  1669. mddev->major_version == 0 &&
  1670. offset != mddev->bitmap_info.default_offset)
  1671. return -EINVAL;
  1672. mddev->bitmap_info.offset = offset;
  1673. if (mddev->pers) {
  1674. mddev->pers->quiesce(mddev, 1);
  1675. rv = bitmap_create(mddev);
  1676. if (rv) {
  1677. bitmap_destroy(mddev);
  1678. mddev->bitmap_info.offset = 0;
  1679. }
  1680. mddev->pers->quiesce(mddev, 0);
  1681. if (rv)
  1682. return rv;
  1683. }
  1684. }
  1685. }
  1686. if (!mddev->external) {
  1687. /* Ensure new bitmap info is stored in
  1688. * metadata promptly.
  1689. */
  1690. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  1691. md_wakeup_thread(mddev->thread);
  1692. }
  1693. return len;
  1694. }
  1695. static struct md_sysfs_entry bitmap_location =
  1696. __ATTR(location, S_IRUGO|S_IWUSR, location_show, location_store);
  1697. static ssize_t
  1698. timeout_show(mddev_t *mddev, char *page)
  1699. {
  1700. ssize_t len;
  1701. unsigned long secs = mddev->bitmap_info.daemon_sleep / HZ;
  1702. unsigned long jifs = mddev->bitmap_info.daemon_sleep % HZ;
  1703. len = sprintf(page, "%lu", secs);
  1704. if (jifs)
  1705. len += sprintf(page+len, ".%03u", jiffies_to_msecs(jifs));
  1706. len += sprintf(page+len, "\n");
  1707. return len;
  1708. }
  1709. static ssize_t
  1710. timeout_store(mddev_t *mddev, const char *buf, size_t len)
  1711. {
  1712. /* timeout can be set at any time */
  1713. unsigned long timeout;
  1714. int rv = strict_strtoul_scaled(buf, &timeout, 4);
  1715. if (rv)
  1716. return rv;
  1717. /* just to make sure we don't overflow... */
  1718. if (timeout >= LONG_MAX / HZ)
  1719. return -EINVAL;
  1720. timeout = timeout * HZ / 10000;
  1721. if (timeout >= MAX_SCHEDULE_TIMEOUT)
  1722. timeout = MAX_SCHEDULE_TIMEOUT-1;
  1723. if (timeout < 1)
  1724. timeout = 1;
  1725. mddev->bitmap_info.daemon_sleep = timeout;
  1726. if (mddev->thread) {
  1727. /* if thread->timeout is MAX_SCHEDULE_TIMEOUT, then
  1728. * the bitmap is all clean and we don't need to
  1729. * adjust the timeout right now
  1730. */
  1731. if (mddev->thread->timeout < MAX_SCHEDULE_TIMEOUT) {
  1732. mddev->thread->timeout = timeout;
  1733. md_wakeup_thread(mddev->thread);
  1734. }
  1735. }
  1736. return len;
  1737. }
  1738. static struct md_sysfs_entry bitmap_timeout =
  1739. __ATTR(time_base, S_IRUGO|S_IWUSR, timeout_show, timeout_store);
  1740. static ssize_t
  1741. backlog_show(mddev_t *mddev, char *page)
  1742. {
  1743. return sprintf(page, "%lu\n", mddev->bitmap_info.max_write_behind);
  1744. }
  1745. static ssize_t
  1746. backlog_store(mddev_t *mddev, const char *buf, size_t len)
  1747. {
  1748. unsigned long backlog;
  1749. int rv = strict_strtoul(buf, 10, &backlog);
  1750. if (rv)
  1751. return rv;
  1752. if (backlog > COUNTER_MAX)
  1753. return -EINVAL;
  1754. mddev->bitmap_info.max_write_behind = backlog;
  1755. return len;
  1756. }
  1757. static struct md_sysfs_entry bitmap_backlog =
  1758. __ATTR(backlog, S_IRUGO|S_IWUSR, backlog_show, backlog_store);
  1759. static ssize_t
  1760. chunksize_show(mddev_t *mddev, char *page)
  1761. {
  1762. return sprintf(page, "%lu\n", mddev->bitmap_info.chunksize);
  1763. }
  1764. static ssize_t
  1765. chunksize_store(mddev_t *mddev, const char *buf, size_t len)
  1766. {
  1767. /* Can only be changed when no bitmap is active */
  1768. int rv;
  1769. unsigned long csize;
  1770. if (mddev->bitmap)
  1771. return -EBUSY;
  1772. rv = strict_strtoul(buf, 10, &csize);
  1773. if (rv)
  1774. return rv;
  1775. if (csize < 512 ||
  1776. !is_power_of_2(csize))
  1777. return -EINVAL;
  1778. mddev->bitmap_info.chunksize = csize;
  1779. return len;
  1780. }
  1781. static struct md_sysfs_entry bitmap_chunksize =
  1782. __ATTR(chunksize, S_IRUGO|S_IWUSR, chunksize_show, chunksize_store);
  1783. static ssize_t metadata_show(mddev_t *mddev, char *page)
  1784. {
  1785. return sprintf(page, "%s\n", (mddev->bitmap_info.external
  1786. ? "external" : "internal"));
  1787. }
  1788. static ssize_t metadata_store(mddev_t *mddev, const char *buf, size_t len)
  1789. {
  1790. if (mddev->bitmap ||
  1791. mddev->bitmap_info.file ||
  1792. mddev->bitmap_info.offset)
  1793. return -EBUSY;
  1794. if (strncmp(buf, "external", 8) == 0)
  1795. mddev->bitmap_info.external = 1;
  1796. else if (strncmp(buf, "internal", 8) == 0)
  1797. mddev->bitmap_info.external = 0;
  1798. else
  1799. return -EINVAL;
  1800. return len;
  1801. }
  1802. static struct md_sysfs_entry bitmap_metadata =
  1803. __ATTR(metadata, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
  1804. static ssize_t can_clear_show(mddev_t *mddev, char *page)
  1805. {
  1806. int len;
  1807. if (mddev->bitmap)
  1808. len = sprintf(page, "%s\n", (mddev->bitmap->need_sync ?
  1809. "false" : "true"));
  1810. else
  1811. len = sprintf(page, "\n");
  1812. return len;
  1813. }
  1814. static ssize_t can_clear_store(mddev_t *mddev, const char *buf, size_t len)
  1815. {
  1816. if (mddev->bitmap == NULL)
  1817. return -ENOENT;
  1818. if (strncmp(buf, "false", 5) == 0)
  1819. mddev->bitmap->need_sync = 1;
  1820. else if (strncmp(buf, "true", 4) == 0) {
  1821. if (mddev->degraded)
  1822. return -EBUSY;
  1823. mddev->bitmap->need_sync = 0;
  1824. } else
  1825. return -EINVAL;
  1826. return len;
  1827. }
  1828. static struct md_sysfs_entry bitmap_can_clear =
  1829. __ATTR(can_clear, S_IRUGO|S_IWUSR, can_clear_show, can_clear_store);
  1830. static ssize_t
  1831. behind_writes_used_show(mddev_t *mddev, char *page)
  1832. {
  1833. if (mddev->bitmap == NULL)
  1834. return sprintf(page, "0\n");
  1835. return sprintf(page, "%lu\n",
  1836. mddev->bitmap->behind_writes_used);
  1837. }
  1838. static ssize_t
  1839. behind_writes_used_reset(mddev_t *mddev, const char *buf, size_t len)
  1840. {
  1841. if (mddev->bitmap)
  1842. mddev->bitmap->behind_writes_used = 0;
  1843. return len;
  1844. }
  1845. static struct md_sysfs_entry max_backlog_used =
  1846. __ATTR(max_backlog_used, S_IRUGO | S_IWUSR,
  1847. behind_writes_used_show, behind_writes_used_reset);
  1848. static struct attribute *md_bitmap_attrs[] = {
  1849. &bitmap_location.attr,
  1850. &bitmap_timeout.attr,
  1851. &bitmap_backlog.attr,
  1852. &bitmap_chunksize.attr,
  1853. &bitmap_metadata.attr,
  1854. &bitmap_can_clear.attr,
  1855. &max_backlog_used.attr,
  1856. NULL
  1857. };
  1858. struct attribute_group md_bitmap_group = {
  1859. .name = "bitmap",
  1860. .attrs = md_bitmap_attrs,
  1861. };