bitmap.c 55 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116
  1. /*
  2. * bitmap.c two-level bitmap (C) Peter T. Breuer (ptb@ot.uc3m.es) 2003
  3. *
  4. * bitmap_create - sets up the bitmap structure
  5. * bitmap_destroy - destroys the bitmap structure
  6. *
  7. * additions, Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.:
  8. * - added disk storage for bitmap
  9. * - changes to allow various bitmap chunk sizes
  10. */
  11. /*
  12. * Still to do:
  13. *
  14. * flush after percent set rather than just time based. (maybe both).
  15. */
  16. #include <linux/blkdev.h>
  17. #include <linux/module.h>
  18. #include <linux/errno.h>
  19. #include <linux/slab.h>
  20. #include <linux/init.h>
  21. #include <linux/timer.h>
  22. #include <linux/sched.h>
  23. #include <linux/list.h>
  24. #include <linux/file.h>
  25. #include <linux/mount.h>
  26. #include <linux/buffer_head.h>
  27. #include "md.h"
  28. #include "bitmap.h"
  29. static inline char *bmname(struct bitmap *bitmap)
  30. {
  31. return bitmap->mddev ? mdname(bitmap->mddev) : "mdX";
  32. }
  33. /*
  34. * just a placeholder - calls kmalloc for bitmap pages
  35. */
  36. static unsigned char *bitmap_alloc_page(struct bitmap *bitmap)
  37. {
  38. unsigned char *page;
  39. page = kzalloc(PAGE_SIZE, GFP_NOIO);
  40. if (!page)
  41. printk("%s: bitmap_alloc_page FAILED\n", bmname(bitmap));
  42. else
  43. pr_debug("%s: bitmap_alloc_page: allocated page at %p\n",
  44. bmname(bitmap), page);
  45. return page;
  46. }
  47. /*
  48. * for now just a placeholder -- just calls kfree for bitmap pages
  49. */
  50. static void bitmap_free_page(struct bitmap *bitmap, unsigned char *page)
  51. {
  52. pr_debug("%s: bitmap_free_page: free page %p\n", bmname(bitmap), page);
  53. kfree(page);
  54. }
  55. /*
  56. * check a page and, if necessary, allocate it (or hijack it if the alloc fails)
  57. *
  58. * 1) check to see if this page is allocated, if it's not then try to alloc
  59. * 2) if the alloc fails, set the page's hijacked flag so we'll use the
  60. * page pointer directly as a counter
  61. *
  62. * if we find our page, we increment the page's refcount so that it stays
  63. * allocated while we're using it
  64. */
  65. static int bitmap_checkpage(struct bitmap *bitmap,
  66. unsigned long page, int create)
  67. __releases(bitmap->lock)
  68. __acquires(bitmap->lock)
  69. {
  70. unsigned char *mappage;
  71. if (page >= bitmap->pages) {
  72. /* This can happen if bitmap_start_sync goes beyond
  73. * End-of-device while looking for a whole page.
  74. * It is harmless.
  75. */
  76. return -EINVAL;
  77. }
  78. if (bitmap->bp[page].hijacked) /* it's hijacked, don't try to alloc */
  79. return 0;
  80. if (bitmap->bp[page].map) /* page is already allocated, just return */
  81. return 0;
  82. if (!create)
  83. return -ENOENT;
  84. /* this page has not been allocated yet */
  85. spin_unlock_irq(&bitmap->lock);
  86. mappage = bitmap_alloc_page(bitmap);
  87. spin_lock_irq(&bitmap->lock);
  88. if (mappage == NULL) {
  89. pr_debug("%s: bitmap map page allocation failed, hijacking\n",
  90. bmname(bitmap));
  91. /* failed - set the hijacked flag so that we can use the
  92. * pointer as a counter */
  93. if (!bitmap->bp[page].map)
  94. bitmap->bp[page].hijacked = 1;
  95. } else if (bitmap->bp[page].map ||
  96. bitmap->bp[page].hijacked) {
  97. /* somebody beat us to getting the page */
  98. bitmap_free_page(bitmap, mappage);
  99. return 0;
  100. } else {
  101. /* no page was in place and we have one, so install it */
  102. bitmap->bp[page].map = mappage;
  103. bitmap->missing_pages--;
  104. }
  105. return 0;
  106. }
  107. /* if page is completely empty, put it back on the free list, or dealloc it */
  108. /* if page was hijacked, unmark the flag so it might get alloced next time */
  109. /* Note: lock should be held when calling this */
  110. static void bitmap_checkfree(struct bitmap *bitmap, unsigned long page)
  111. {
  112. char *ptr;
  113. if (bitmap->bp[page].count) /* page is still busy */
  114. return;
  115. /* page is no longer in use, it can be released */
  116. if (bitmap->bp[page].hijacked) { /* page was hijacked, undo this now */
  117. bitmap->bp[page].hijacked = 0;
  118. bitmap->bp[page].map = NULL;
  119. } else {
  120. /* normal case, free the page */
  121. ptr = bitmap->bp[page].map;
  122. bitmap->bp[page].map = NULL;
  123. bitmap->missing_pages++;
  124. bitmap_free_page(bitmap, ptr);
  125. }
  126. }
  127. /*
  128. * bitmap file handling - read and write the bitmap file and its superblock
  129. */
  130. /*
  131. * basic page I/O operations
  132. */
  133. /* IO operations when bitmap is stored near all superblocks */
  134. static struct page *read_sb_page(struct mddev *mddev, loff_t offset,
  135. struct page *page,
  136. unsigned long index, int size)
  137. {
  138. /* choose a good rdev and read the page from there */
  139. struct md_rdev *rdev;
  140. sector_t target;
  141. int did_alloc = 0;
  142. if (!page) {
  143. page = alloc_page(GFP_KERNEL);
  144. if (!page)
  145. return ERR_PTR(-ENOMEM);
  146. did_alloc = 1;
  147. }
  148. list_for_each_entry(rdev, &mddev->disks, same_set) {
  149. if (! test_bit(In_sync, &rdev->flags)
  150. || test_bit(Faulty, &rdev->flags))
  151. continue;
  152. target = offset + index * (PAGE_SIZE/512);
  153. if (sync_page_io(rdev, target,
  154. roundup(size, bdev_logical_block_size(rdev->bdev)),
  155. page, READ, true)) {
  156. page->index = index;
  157. attach_page_buffers(page, NULL); /* so that free_buffer will
  158. * quietly no-op */
  159. return page;
  160. }
  161. }
  162. if (did_alloc)
  163. put_page(page);
  164. return ERR_PTR(-EIO);
  165. }
  166. static struct md_rdev *next_active_rdev(struct md_rdev *rdev, struct mddev *mddev)
  167. {
  168. /* Iterate the disks of an mddev, using rcu to protect access to the
  169. * linked list, and raising the refcount of devices we return to ensure
  170. * they don't disappear while in use.
  171. * As devices are only added or removed when raid_disk is < 0 and
  172. * nr_pending is 0 and In_sync is clear, the entries we return will
  173. * still be in the same position on the list when we re-enter
  174. * list_for_each_continue_rcu.
  175. */
  176. struct list_head *pos;
  177. rcu_read_lock();
  178. if (rdev == NULL)
  179. /* start at the beginning */
  180. pos = &mddev->disks;
  181. else {
  182. /* release the previous rdev and start from there. */
  183. rdev_dec_pending(rdev, mddev);
  184. pos = &rdev->same_set;
  185. }
  186. list_for_each_continue_rcu(pos, &mddev->disks) {
  187. rdev = list_entry(pos, struct md_rdev, same_set);
  188. if (rdev->raid_disk >= 0 &&
  189. !test_bit(Faulty, &rdev->flags)) {
  190. /* this is a usable devices */
  191. atomic_inc(&rdev->nr_pending);
  192. rcu_read_unlock();
  193. return rdev;
  194. }
  195. }
  196. rcu_read_unlock();
  197. return NULL;
  198. }
  199. static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
  200. {
  201. struct md_rdev *rdev = NULL;
  202. struct block_device *bdev;
  203. struct mddev *mddev = bitmap->mddev;
  204. while ((rdev = next_active_rdev(rdev, mddev)) != NULL) {
  205. int size = PAGE_SIZE;
  206. loff_t offset = mddev->bitmap_info.offset;
  207. bdev = (rdev->meta_bdev) ? rdev->meta_bdev : rdev->bdev;
  208. if (page->index == bitmap->file_pages-1)
  209. size = roundup(bitmap->last_page_size,
  210. bdev_logical_block_size(bdev));
  211. /* Just make sure we aren't corrupting data or
  212. * metadata
  213. */
  214. if (mddev->external) {
  215. /* Bitmap could be anywhere. */
  216. if (rdev->sb_start + offset + (page->index
  217. * (PAGE_SIZE/512))
  218. > rdev->data_offset
  219. &&
  220. rdev->sb_start + offset
  221. < (rdev->data_offset + mddev->dev_sectors
  222. + (PAGE_SIZE/512)))
  223. goto bad_alignment;
  224. } else if (offset < 0) {
  225. /* DATA BITMAP METADATA */
  226. if (offset
  227. + (long)(page->index * (PAGE_SIZE/512))
  228. + size/512 > 0)
  229. /* bitmap runs in to metadata */
  230. goto bad_alignment;
  231. if (rdev->data_offset + mddev->dev_sectors
  232. > rdev->sb_start + offset)
  233. /* data runs in to bitmap */
  234. goto bad_alignment;
  235. } else if (rdev->sb_start < rdev->data_offset) {
  236. /* METADATA BITMAP DATA */
  237. if (rdev->sb_start
  238. + offset
  239. + page->index*(PAGE_SIZE/512) + size/512
  240. > rdev->data_offset)
  241. /* bitmap runs in to data */
  242. goto bad_alignment;
  243. } else {
  244. /* DATA METADATA BITMAP - no problems */
  245. }
  246. md_super_write(mddev, rdev,
  247. rdev->sb_start + offset
  248. + page->index * (PAGE_SIZE/512),
  249. size,
  250. page);
  251. }
  252. if (wait)
  253. md_super_wait(mddev);
  254. return 0;
  255. bad_alignment:
  256. return -EINVAL;
  257. }
  258. static void bitmap_file_kick(struct bitmap *bitmap);
  259. /*
  260. * write out a page to a file
  261. */
  262. static void write_page(struct bitmap *bitmap, struct page *page, int wait)
  263. {
  264. struct buffer_head *bh;
  265. if (bitmap->file == NULL) {
  266. switch (write_sb_page(bitmap, page, wait)) {
  267. case -EINVAL:
  268. bitmap->flags |= BITMAP_WRITE_ERROR;
  269. }
  270. } else {
  271. bh = page_buffers(page);
  272. while (bh && bh->b_blocknr) {
  273. atomic_inc(&bitmap->pending_writes);
  274. set_buffer_locked(bh);
  275. set_buffer_mapped(bh);
  276. submit_bh(WRITE | REQ_SYNC, bh);
  277. bh = bh->b_this_page;
  278. }
  279. if (wait)
  280. wait_event(bitmap->write_wait,
  281. atomic_read(&bitmap->pending_writes)==0);
  282. }
  283. if (bitmap->flags & BITMAP_WRITE_ERROR)
  284. bitmap_file_kick(bitmap);
  285. }
  286. static void end_bitmap_write(struct buffer_head *bh, int uptodate)
  287. {
  288. struct bitmap *bitmap = bh->b_private;
  289. unsigned long flags;
  290. if (!uptodate) {
  291. spin_lock_irqsave(&bitmap->lock, flags);
  292. bitmap->flags |= BITMAP_WRITE_ERROR;
  293. spin_unlock_irqrestore(&bitmap->lock, flags);
  294. }
  295. if (atomic_dec_and_test(&bitmap->pending_writes))
  296. wake_up(&bitmap->write_wait);
  297. }
  298. /* copied from buffer.c */
  299. static void
  300. __clear_page_buffers(struct page *page)
  301. {
  302. ClearPagePrivate(page);
  303. set_page_private(page, 0);
  304. page_cache_release(page);
  305. }
  306. static void free_buffers(struct page *page)
  307. {
  308. struct buffer_head *bh = page_buffers(page);
  309. while (bh) {
  310. struct buffer_head *next = bh->b_this_page;
  311. free_buffer_head(bh);
  312. bh = next;
  313. }
  314. __clear_page_buffers(page);
  315. put_page(page);
  316. }
  317. /* read a page from a file.
  318. * We both read the page, and attach buffers to the page to record the
  319. * address of each block (using bmap). These addresses will be used
  320. * to write the block later, completely bypassing the filesystem.
  321. * This usage is similar to how swap files are handled, and allows us
  322. * to write to a file with no concerns of memory allocation failing.
  323. */
  324. static struct page *read_page(struct file *file, unsigned long index,
  325. struct bitmap *bitmap,
  326. unsigned long count)
  327. {
  328. struct page *page = NULL;
  329. struct inode *inode = file->f_path.dentry->d_inode;
  330. struct buffer_head *bh;
  331. sector_t block;
  332. pr_debug("read bitmap file (%dB @ %llu)\n", (int)PAGE_SIZE,
  333. (unsigned long long)index << PAGE_SHIFT);
  334. page = alloc_page(GFP_KERNEL);
  335. if (!page)
  336. page = ERR_PTR(-ENOMEM);
  337. if (IS_ERR(page))
  338. goto out;
  339. bh = alloc_page_buffers(page, 1<<inode->i_blkbits, 0);
  340. if (!bh) {
  341. put_page(page);
  342. page = ERR_PTR(-ENOMEM);
  343. goto out;
  344. }
  345. attach_page_buffers(page, bh);
  346. block = index << (PAGE_SHIFT - inode->i_blkbits);
  347. while (bh) {
  348. if (count == 0)
  349. bh->b_blocknr = 0;
  350. else {
  351. bh->b_blocknr = bmap(inode, block);
  352. if (bh->b_blocknr == 0) {
  353. /* Cannot use this file! */
  354. free_buffers(page);
  355. page = ERR_PTR(-EINVAL);
  356. goto out;
  357. }
  358. bh->b_bdev = inode->i_sb->s_bdev;
  359. if (count < (1<<inode->i_blkbits))
  360. count = 0;
  361. else
  362. count -= (1<<inode->i_blkbits);
  363. bh->b_end_io = end_bitmap_write;
  364. bh->b_private = bitmap;
  365. atomic_inc(&bitmap->pending_writes);
  366. set_buffer_locked(bh);
  367. set_buffer_mapped(bh);
  368. submit_bh(READ, bh);
  369. }
  370. block++;
  371. bh = bh->b_this_page;
  372. }
  373. page->index = index;
  374. wait_event(bitmap->write_wait,
  375. atomic_read(&bitmap->pending_writes)==0);
  376. if (bitmap->flags & BITMAP_WRITE_ERROR) {
  377. free_buffers(page);
  378. page = ERR_PTR(-EIO);
  379. }
  380. out:
  381. if (IS_ERR(page))
  382. printk(KERN_ALERT "md: bitmap read error: (%dB @ %llu): %ld\n",
  383. (int)PAGE_SIZE,
  384. (unsigned long long)index << PAGE_SHIFT,
  385. PTR_ERR(page));
  386. return page;
  387. }
  388. /*
  389. * bitmap file superblock operations
  390. */
  391. /* update the event counter and sync the superblock to disk */
  392. void bitmap_update_sb(struct bitmap *bitmap)
  393. {
  394. bitmap_super_t *sb;
  395. unsigned long flags;
  396. if (!bitmap || !bitmap->mddev) /* no bitmap for this array */
  397. return;
  398. if (bitmap->mddev->bitmap_info.external)
  399. return;
  400. spin_lock_irqsave(&bitmap->lock, flags);
  401. if (!bitmap->sb_page) { /* no superblock */
  402. spin_unlock_irqrestore(&bitmap->lock, flags);
  403. return;
  404. }
  405. spin_unlock_irqrestore(&bitmap->lock, flags);
  406. sb = kmap_atomic(bitmap->sb_page, KM_USER0);
  407. sb->events = cpu_to_le64(bitmap->mddev->events);
  408. if (bitmap->mddev->events < bitmap->events_cleared)
  409. /* rocking back to read-only */
  410. bitmap->events_cleared = bitmap->mddev->events;
  411. sb->events_cleared = cpu_to_le64(bitmap->events_cleared);
  412. sb->state = cpu_to_le32(bitmap->flags);
  413. /* Just in case these have been changed via sysfs: */
  414. sb->daemon_sleep = cpu_to_le32(bitmap->mddev->bitmap_info.daemon_sleep/HZ);
  415. sb->write_behind = cpu_to_le32(bitmap->mddev->bitmap_info.max_write_behind);
  416. kunmap_atomic(sb, KM_USER0);
  417. write_page(bitmap, bitmap->sb_page, 1);
  418. }
  419. /* print out the bitmap file superblock */
  420. void bitmap_print_sb(struct bitmap *bitmap)
  421. {
  422. bitmap_super_t *sb;
  423. if (!bitmap || !bitmap->sb_page)
  424. return;
  425. sb = kmap_atomic(bitmap->sb_page, KM_USER0);
  426. printk(KERN_DEBUG "%s: bitmap file superblock:\n", bmname(bitmap));
  427. printk(KERN_DEBUG " magic: %08x\n", le32_to_cpu(sb->magic));
  428. printk(KERN_DEBUG " version: %d\n", le32_to_cpu(sb->version));
  429. printk(KERN_DEBUG " uuid: %08x.%08x.%08x.%08x\n",
  430. *(__u32 *)(sb->uuid+0),
  431. *(__u32 *)(sb->uuid+4),
  432. *(__u32 *)(sb->uuid+8),
  433. *(__u32 *)(sb->uuid+12));
  434. printk(KERN_DEBUG " events: %llu\n",
  435. (unsigned long long) le64_to_cpu(sb->events));
  436. printk(KERN_DEBUG "events cleared: %llu\n",
  437. (unsigned long long) le64_to_cpu(sb->events_cleared));
  438. printk(KERN_DEBUG " state: %08x\n", le32_to_cpu(sb->state));
  439. printk(KERN_DEBUG " chunksize: %d B\n", le32_to_cpu(sb->chunksize));
  440. printk(KERN_DEBUG " daemon sleep: %ds\n", le32_to_cpu(sb->daemon_sleep));
  441. printk(KERN_DEBUG " sync size: %llu KB\n",
  442. (unsigned long long)le64_to_cpu(sb->sync_size)/2);
  443. printk(KERN_DEBUG "max write behind: %d\n", le32_to_cpu(sb->write_behind));
  444. kunmap_atomic(sb, KM_USER0);
  445. }
  446. /*
  447. * bitmap_new_disk_sb
  448. * @bitmap
  449. *
  450. * This function is somewhat the reverse of bitmap_read_sb. bitmap_read_sb
  451. * reads and verifies the on-disk bitmap superblock and populates bitmap_info.
  452. * This function verifies 'bitmap_info' and populates the on-disk bitmap
  453. * structure, which is to be written to disk.
  454. *
  455. * Returns: 0 on success, -Exxx on error
  456. */
  457. static int bitmap_new_disk_sb(struct bitmap *bitmap)
  458. {
  459. bitmap_super_t *sb;
  460. unsigned long chunksize, daemon_sleep, write_behind;
  461. int err = -EINVAL;
  462. bitmap->sb_page = alloc_page(GFP_KERNEL);
  463. if (IS_ERR(bitmap->sb_page)) {
  464. err = PTR_ERR(bitmap->sb_page);
  465. bitmap->sb_page = NULL;
  466. return err;
  467. }
  468. bitmap->sb_page->index = 0;
  469. sb = kmap_atomic(bitmap->sb_page, KM_USER0);
  470. sb->magic = cpu_to_le32(BITMAP_MAGIC);
  471. sb->version = cpu_to_le32(BITMAP_MAJOR_HI);
  472. chunksize = bitmap->mddev->bitmap_info.chunksize;
  473. BUG_ON(!chunksize);
  474. if (!is_power_of_2(chunksize)) {
  475. kunmap_atomic(sb, KM_USER0);
  476. printk(KERN_ERR "bitmap chunksize not a power of 2\n");
  477. return -EINVAL;
  478. }
  479. sb->chunksize = cpu_to_le32(chunksize);
  480. daemon_sleep = bitmap->mddev->bitmap_info.daemon_sleep;
  481. if (!daemon_sleep ||
  482. (daemon_sleep < 1) || (daemon_sleep > MAX_SCHEDULE_TIMEOUT)) {
  483. printk(KERN_INFO "Choosing daemon_sleep default (5 sec)\n");
  484. daemon_sleep = 5 * HZ;
  485. }
  486. sb->daemon_sleep = cpu_to_le32(daemon_sleep);
  487. bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
  488. /*
  489. * FIXME: write_behind for RAID1. If not specified, what
  490. * is a good choice? We choose COUNTER_MAX / 2 arbitrarily.
  491. */
  492. write_behind = bitmap->mddev->bitmap_info.max_write_behind;
  493. if (write_behind > COUNTER_MAX)
  494. write_behind = COUNTER_MAX / 2;
  495. sb->write_behind = cpu_to_le32(write_behind);
  496. bitmap->mddev->bitmap_info.max_write_behind = write_behind;
  497. /* keep the array size field of the bitmap superblock up to date */
  498. sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
  499. memcpy(sb->uuid, bitmap->mddev->uuid, 16);
  500. bitmap->flags |= BITMAP_STALE;
  501. sb->state |= cpu_to_le32(BITMAP_STALE);
  502. bitmap->events_cleared = bitmap->mddev->events;
  503. sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
  504. bitmap->flags |= BITMAP_HOSTENDIAN;
  505. sb->version = cpu_to_le32(BITMAP_MAJOR_HOSTENDIAN);
  506. kunmap_atomic(sb, KM_USER0);
  507. return 0;
  508. }
  509. /* read the superblock from the bitmap file and initialize some bitmap fields */
  510. static int bitmap_read_sb(struct bitmap *bitmap)
  511. {
  512. char *reason = NULL;
  513. bitmap_super_t *sb;
  514. unsigned long chunksize, daemon_sleep, write_behind;
  515. unsigned long long events;
  516. int err = -EINVAL;
  517. /* page 0 is the superblock, read it... */
  518. if (bitmap->file) {
  519. loff_t isize = i_size_read(bitmap->file->f_mapping->host);
  520. int bytes = isize > PAGE_SIZE ? PAGE_SIZE : isize;
  521. bitmap->sb_page = read_page(bitmap->file, 0, bitmap, bytes);
  522. } else {
  523. bitmap->sb_page = read_sb_page(bitmap->mddev,
  524. bitmap->mddev->bitmap_info.offset,
  525. NULL,
  526. 0, sizeof(bitmap_super_t));
  527. }
  528. if (IS_ERR(bitmap->sb_page)) {
  529. err = PTR_ERR(bitmap->sb_page);
  530. bitmap->sb_page = NULL;
  531. return err;
  532. }
  533. sb = kmap_atomic(bitmap->sb_page, KM_USER0);
  534. chunksize = le32_to_cpu(sb->chunksize);
  535. daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
  536. write_behind = le32_to_cpu(sb->write_behind);
  537. /* verify that the bitmap-specific fields are valid */
  538. if (sb->magic != cpu_to_le32(BITMAP_MAGIC))
  539. reason = "bad magic";
  540. else if (le32_to_cpu(sb->version) < BITMAP_MAJOR_LO ||
  541. le32_to_cpu(sb->version) > BITMAP_MAJOR_HI)
  542. reason = "unrecognized superblock version";
  543. else if (chunksize < 512)
  544. reason = "bitmap chunksize too small";
  545. else if (!is_power_of_2(chunksize))
  546. reason = "bitmap chunksize not a power of 2";
  547. else if (daemon_sleep < 1 || daemon_sleep > MAX_SCHEDULE_TIMEOUT)
  548. reason = "daemon sleep period out of range";
  549. else if (write_behind > COUNTER_MAX)
  550. reason = "write-behind limit out of range (0 - 16383)";
  551. if (reason) {
  552. printk(KERN_INFO "%s: invalid bitmap file superblock: %s\n",
  553. bmname(bitmap), reason);
  554. goto out;
  555. }
  556. /* keep the array size field of the bitmap superblock up to date */
  557. sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
  558. if (!bitmap->mddev->persistent)
  559. goto success;
  560. /*
  561. * if we have a persistent array superblock, compare the
  562. * bitmap's UUID and event counter to the mddev's
  563. */
  564. if (memcmp(sb->uuid, bitmap->mddev->uuid, 16)) {
  565. printk(KERN_INFO "%s: bitmap superblock UUID mismatch\n",
  566. bmname(bitmap));
  567. goto out;
  568. }
  569. events = le64_to_cpu(sb->events);
  570. if (events < bitmap->mddev->events) {
  571. printk(KERN_INFO "%s: bitmap file is out of date (%llu < %llu) "
  572. "-- forcing full recovery\n", bmname(bitmap), events,
  573. (unsigned long long) bitmap->mddev->events);
  574. sb->state |= cpu_to_le32(BITMAP_STALE);
  575. }
  576. success:
  577. /* assign fields using values from superblock */
  578. bitmap->mddev->bitmap_info.chunksize = chunksize;
  579. bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
  580. bitmap->mddev->bitmap_info.max_write_behind = write_behind;
  581. bitmap->flags |= le32_to_cpu(sb->state);
  582. if (le32_to_cpu(sb->version) == BITMAP_MAJOR_HOSTENDIAN)
  583. bitmap->flags |= BITMAP_HOSTENDIAN;
  584. bitmap->events_cleared = le64_to_cpu(sb->events_cleared);
  585. if (bitmap->flags & BITMAP_STALE)
  586. bitmap->events_cleared = bitmap->mddev->events;
  587. err = 0;
  588. out:
  589. kunmap_atomic(sb, KM_USER0);
  590. if (err)
  591. bitmap_print_sb(bitmap);
  592. return err;
  593. }
  594. enum bitmap_mask_op {
  595. MASK_SET,
  596. MASK_UNSET
  597. };
  598. /* record the state of the bitmap in the superblock. Return the old value */
  599. static int bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits,
  600. enum bitmap_mask_op op)
  601. {
  602. bitmap_super_t *sb;
  603. unsigned long flags;
  604. int old;
  605. spin_lock_irqsave(&bitmap->lock, flags);
  606. if (!bitmap->sb_page) { /* can't set the state */
  607. spin_unlock_irqrestore(&bitmap->lock, flags);
  608. return 0;
  609. }
  610. spin_unlock_irqrestore(&bitmap->lock, flags);
  611. sb = kmap_atomic(bitmap->sb_page, KM_USER0);
  612. old = le32_to_cpu(sb->state) & bits;
  613. switch (op) {
  614. case MASK_SET:
  615. sb->state |= cpu_to_le32(bits);
  616. bitmap->flags |= bits;
  617. break;
  618. case MASK_UNSET:
  619. sb->state &= cpu_to_le32(~bits);
  620. bitmap->flags &= ~bits;
  621. break;
  622. default:
  623. BUG();
  624. }
  625. kunmap_atomic(sb, KM_USER0);
  626. return old;
  627. }
  628. /*
  629. * general bitmap file operations
  630. */
  631. /*
  632. * on-disk bitmap:
  633. *
  634. * Use one bit per "chunk" (block set). We do the disk I/O on the bitmap
  635. * file a page at a time. There's a superblock at the start of the file.
  636. */
  637. /* calculate the index of the page that contains this bit */
  638. static inline unsigned long file_page_index(struct bitmap *bitmap, unsigned long chunk)
  639. {
  640. if (!bitmap->mddev->bitmap_info.external)
  641. chunk += sizeof(bitmap_super_t) << 3;
  642. return chunk >> PAGE_BIT_SHIFT;
  643. }
  644. /* calculate the (bit) offset of this bit within a page */
  645. static inline unsigned long file_page_offset(struct bitmap *bitmap, unsigned long chunk)
  646. {
  647. if (!bitmap->mddev->bitmap_info.external)
  648. chunk += sizeof(bitmap_super_t) << 3;
  649. return chunk & (PAGE_BITS - 1);
  650. }
  651. /*
  652. * return a pointer to the page in the filemap that contains the given bit
  653. *
  654. * this lookup is complicated by the fact that the bitmap sb might be exactly
  655. * 1 page (e.g., x86) or less than 1 page -- so the bitmap might start on page
  656. * 0 or page 1
  657. */
  658. static inline struct page *filemap_get_page(struct bitmap *bitmap,
  659. unsigned long chunk)
  660. {
  661. if (file_page_index(bitmap, chunk) >= bitmap->file_pages)
  662. return NULL;
  663. return bitmap->filemap[file_page_index(bitmap, chunk)
  664. - file_page_index(bitmap, 0)];
  665. }
  666. static void bitmap_file_unmap(struct bitmap *bitmap)
  667. {
  668. struct page **map, *sb_page;
  669. unsigned long *attr;
  670. int pages;
  671. unsigned long flags;
  672. spin_lock_irqsave(&bitmap->lock, flags);
  673. map = bitmap->filemap;
  674. bitmap->filemap = NULL;
  675. attr = bitmap->filemap_attr;
  676. bitmap->filemap_attr = NULL;
  677. pages = bitmap->file_pages;
  678. bitmap->file_pages = 0;
  679. sb_page = bitmap->sb_page;
  680. bitmap->sb_page = NULL;
  681. spin_unlock_irqrestore(&bitmap->lock, flags);
  682. while (pages--)
  683. if (map[pages] != sb_page) /* 0 is sb_page, release it below */
  684. free_buffers(map[pages]);
  685. kfree(map);
  686. kfree(attr);
  687. if (sb_page)
  688. free_buffers(sb_page);
  689. }
  690. static void bitmap_file_put(struct bitmap *bitmap)
  691. {
  692. struct file *file;
  693. unsigned long flags;
  694. spin_lock_irqsave(&bitmap->lock, flags);
  695. file = bitmap->file;
  696. bitmap->file = NULL;
  697. spin_unlock_irqrestore(&bitmap->lock, flags);
  698. if (file)
  699. wait_event(bitmap->write_wait,
  700. atomic_read(&bitmap->pending_writes)==0);
  701. bitmap_file_unmap(bitmap);
  702. if (file) {
  703. struct inode *inode = file->f_path.dentry->d_inode;
  704. invalidate_mapping_pages(inode->i_mapping, 0, -1);
  705. fput(file);
  706. }
  707. }
  708. /*
  709. * bitmap_file_kick - if an error occurs while manipulating the bitmap file
  710. * then it is no longer reliable, so we stop using it and we mark the file
  711. * as failed in the superblock
  712. */
  713. static void bitmap_file_kick(struct bitmap *bitmap)
  714. {
  715. char *path, *ptr = NULL;
  716. if (bitmap_mask_state(bitmap, BITMAP_STALE, MASK_SET) == 0) {
  717. bitmap_update_sb(bitmap);
  718. if (bitmap->file) {
  719. path = kmalloc(PAGE_SIZE, GFP_KERNEL);
  720. if (path)
  721. ptr = d_path(&bitmap->file->f_path, path,
  722. PAGE_SIZE);
  723. printk(KERN_ALERT
  724. "%s: kicking failed bitmap file %s from array!\n",
  725. bmname(bitmap), IS_ERR(ptr) ? "" : ptr);
  726. kfree(path);
  727. } else
  728. printk(KERN_ALERT
  729. "%s: disabling internal bitmap due to errors\n",
  730. bmname(bitmap));
  731. }
  732. bitmap_file_put(bitmap);
  733. return;
  734. }
  735. enum bitmap_page_attr {
  736. BITMAP_PAGE_DIRTY = 0, /* there are set bits that need to be synced */
  737. BITMAP_PAGE_PENDING = 1, /* there are bits that are being cleaned.
  738. * i.e. counter is 1 or 2. */
  739. BITMAP_PAGE_NEEDWRITE = 2, /* there are cleared bits that need to be synced */
  740. };
  741. static inline void set_page_attr(struct bitmap *bitmap, struct page *page,
  742. enum bitmap_page_attr attr)
  743. {
  744. __set_bit((page->index<<2) + attr, bitmap->filemap_attr);
  745. }
  746. static inline void clear_page_attr(struct bitmap *bitmap, struct page *page,
  747. enum bitmap_page_attr attr)
  748. {
  749. __clear_bit((page->index<<2) + attr, bitmap->filemap_attr);
  750. }
  751. static inline unsigned long test_page_attr(struct bitmap *bitmap, struct page *page,
  752. enum bitmap_page_attr attr)
  753. {
  754. return test_bit((page->index<<2) + attr, bitmap->filemap_attr);
  755. }
  756. /*
  757. * bitmap_file_set_bit -- called before performing a write to the md device
  758. * to set (and eventually sync) a particular bit in the bitmap file
  759. *
  760. * we set the bit immediately, then we record the page number so that
  761. * when an unplug occurs, we can flush the dirty pages out to disk
  762. */
  763. static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
  764. {
  765. unsigned long bit;
  766. struct page *page;
  767. void *kaddr;
  768. unsigned long chunk = block >> CHUNK_BLOCK_SHIFT(bitmap);
  769. if (!bitmap->filemap)
  770. return;
  771. page = filemap_get_page(bitmap, chunk);
  772. if (!page)
  773. return;
  774. bit = file_page_offset(bitmap, chunk);
  775. /* set the bit */
  776. kaddr = kmap_atomic(page, KM_USER0);
  777. if (bitmap->flags & BITMAP_HOSTENDIAN)
  778. set_bit(bit, kaddr);
  779. else
  780. __set_bit_le(bit, kaddr);
  781. kunmap_atomic(kaddr, KM_USER0);
  782. pr_debug("set file bit %lu page %lu\n", bit, page->index);
  783. /* record page number so it gets flushed to disk when unplug occurs */
  784. set_page_attr(bitmap, page, BITMAP_PAGE_DIRTY);
  785. }
  786. /* this gets called when the md device is ready to unplug its underlying
  787. * (slave) device queues -- before we let any writes go down, we need to
  788. * sync the dirty pages of the bitmap file to disk */
  789. void bitmap_unplug(struct bitmap *bitmap)
  790. {
  791. unsigned long i, flags;
  792. int dirty, need_write;
  793. struct page *page;
  794. int wait = 0;
  795. if (!bitmap)
  796. return;
  797. /* look at each page to see if there are any set bits that need to be
  798. * flushed out to disk */
  799. for (i = 0; i < bitmap->file_pages; i++) {
  800. spin_lock_irqsave(&bitmap->lock, flags);
  801. if (!bitmap->filemap) {
  802. spin_unlock_irqrestore(&bitmap->lock, flags);
  803. return;
  804. }
  805. page = bitmap->filemap[i];
  806. dirty = test_page_attr(bitmap, page, BITMAP_PAGE_DIRTY);
  807. need_write = test_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE);
  808. clear_page_attr(bitmap, page, BITMAP_PAGE_DIRTY);
  809. clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE);
  810. if (dirty)
  811. wait = 1;
  812. spin_unlock_irqrestore(&bitmap->lock, flags);
  813. if (dirty || need_write)
  814. write_page(bitmap, page, 0);
  815. }
  816. if (wait) { /* if any writes were performed, we need to wait on them */
  817. if (bitmap->file)
  818. wait_event(bitmap->write_wait,
  819. atomic_read(&bitmap->pending_writes)==0);
  820. else
  821. md_super_wait(bitmap->mddev);
  822. }
  823. if (bitmap->flags & BITMAP_WRITE_ERROR)
  824. bitmap_file_kick(bitmap);
  825. }
  826. EXPORT_SYMBOL(bitmap_unplug);
  827. static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed);
  828. /* * bitmap_init_from_disk -- called at bitmap_create time to initialize
  829. * the in-memory bitmap from the on-disk bitmap -- also, sets up the
  830. * memory mapping of the bitmap file
  831. * Special cases:
  832. * if there's no bitmap file, or if the bitmap file had been
  833. * previously kicked from the array, we mark all the bits as
  834. * 1's in order to cause a full resync.
  835. *
  836. * We ignore all bits for sectors that end earlier than 'start'.
  837. * This is used when reading an out-of-date bitmap...
  838. */
  839. static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
  840. {
  841. unsigned long i, chunks, index, oldindex, bit;
  842. struct page *page = NULL, *oldpage = NULL;
  843. unsigned long num_pages, bit_cnt = 0;
  844. struct file *file;
  845. unsigned long bytes, offset;
  846. int outofdate;
  847. int ret = -ENOSPC;
  848. void *paddr;
  849. chunks = bitmap->chunks;
  850. file = bitmap->file;
  851. BUG_ON(!file && !bitmap->mddev->bitmap_info.offset);
  852. outofdate = bitmap->flags & BITMAP_STALE;
  853. if (outofdate)
  854. printk(KERN_INFO "%s: bitmap file is out of date, doing full "
  855. "recovery\n", bmname(bitmap));
  856. bytes = DIV_ROUND_UP(bitmap->chunks, 8);
  857. if (!bitmap->mddev->bitmap_info.external)
  858. bytes += sizeof(bitmap_super_t);
  859. num_pages = DIV_ROUND_UP(bytes, PAGE_SIZE);
  860. if (file && i_size_read(file->f_mapping->host) < bytes) {
  861. printk(KERN_INFO "%s: bitmap file too short %lu < %lu\n",
  862. bmname(bitmap),
  863. (unsigned long) i_size_read(file->f_mapping->host),
  864. bytes);
  865. goto err;
  866. }
  867. ret = -ENOMEM;
  868. bitmap->filemap = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL);
  869. if (!bitmap->filemap)
  870. goto err;
  871. /* We need 4 bits per page, rounded up to a multiple of sizeof(unsigned long) */
  872. bitmap->filemap_attr = kzalloc(
  873. roundup(DIV_ROUND_UP(num_pages*4, 8), sizeof(unsigned long)),
  874. GFP_KERNEL);
  875. if (!bitmap->filemap_attr)
  876. goto err;
  877. oldindex = ~0L;
  878. for (i = 0; i < chunks; i++) {
  879. int b;
  880. index = file_page_index(bitmap, i);
  881. bit = file_page_offset(bitmap, i);
  882. if (index != oldindex) { /* this is a new page, read it in */
  883. int count;
  884. /* unmap the old page, we're done with it */
  885. if (index == num_pages-1)
  886. count = bytes - index * PAGE_SIZE;
  887. else
  888. count = PAGE_SIZE;
  889. if (index == 0 && bitmap->sb_page) {
  890. /*
  891. * if we're here then the superblock page
  892. * contains some bits (PAGE_SIZE != sizeof sb)
  893. * we've already read it in, so just use it
  894. */
  895. page = bitmap->sb_page;
  896. offset = sizeof(bitmap_super_t);
  897. if (!file)
  898. page = read_sb_page(
  899. bitmap->mddev,
  900. bitmap->mddev->bitmap_info.offset,
  901. page,
  902. index, count);
  903. } else if (file) {
  904. page = read_page(file, index, bitmap, count);
  905. offset = 0;
  906. } else {
  907. page = read_sb_page(bitmap->mddev,
  908. bitmap->mddev->bitmap_info.offset,
  909. NULL,
  910. index, count);
  911. offset = 0;
  912. }
  913. if (IS_ERR(page)) { /* read error */
  914. ret = PTR_ERR(page);
  915. goto err;
  916. }
  917. oldindex = index;
  918. oldpage = page;
  919. bitmap->filemap[bitmap->file_pages++] = page;
  920. bitmap->last_page_size = count;
  921. if (outofdate) {
  922. /*
  923. * if bitmap is out of date, dirty the
  924. * whole page and write it out
  925. */
  926. paddr = kmap_atomic(page, KM_USER0);
  927. memset(paddr + offset, 0xff,
  928. PAGE_SIZE - offset);
  929. kunmap_atomic(paddr, KM_USER0);
  930. write_page(bitmap, page, 1);
  931. ret = -EIO;
  932. if (bitmap->flags & BITMAP_WRITE_ERROR)
  933. goto err;
  934. }
  935. }
  936. paddr = kmap_atomic(page, KM_USER0);
  937. if (bitmap->flags & BITMAP_HOSTENDIAN)
  938. b = test_bit(bit, paddr);
  939. else
  940. b = test_bit_le(bit, paddr);
  941. kunmap_atomic(paddr, KM_USER0);
  942. if (b) {
  943. /* if the disk bit is set, set the memory bit */
  944. int needed = ((sector_t)(i+1) << (CHUNK_BLOCK_SHIFT(bitmap))
  945. >= start);
  946. bitmap_set_memory_bits(bitmap,
  947. (sector_t)i << CHUNK_BLOCK_SHIFT(bitmap),
  948. needed);
  949. bit_cnt++;
  950. }
  951. }
  952. /* everything went OK */
  953. ret = 0;
  954. bitmap_mask_state(bitmap, BITMAP_STALE, MASK_UNSET);
  955. if (bit_cnt) { /* Kick recovery if any bits were set */
  956. set_bit(MD_RECOVERY_NEEDED, &bitmap->mddev->recovery);
  957. md_wakeup_thread(bitmap->mddev->thread);
  958. }
  959. printk(KERN_INFO "%s: bitmap initialized from disk: "
  960. "read %lu/%lu pages, set %lu of %lu bits\n",
  961. bmname(bitmap), bitmap->file_pages, num_pages, bit_cnt, chunks);
  962. return 0;
  963. err:
  964. printk(KERN_INFO "%s: bitmap initialisation failed: %d\n",
  965. bmname(bitmap), ret);
  966. return ret;
  967. }
  968. void bitmap_write_all(struct bitmap *bitmap)
  969. {
  970. /* We don't actually write all bitmap blocks here,
  971. * just flag them as needing to be written
  972. */
  973. int i;
  974. for (i = 0; i < bitmap->file_pages; i++)
  975. set_page_attr(bitmap, bitmap->filemap[i],
  976. BITMAP_PAGE_NEEDWRITE);
  977. bitmap->allclean = 0;
  978. }
  979. static void bitmap_count_page(struct bitmap *bitmap, sector_t offset, int inc)
  980. {
  981. sector_t chunk = offset >> CHUNK_BLOCK_SHIFT(bitmap);
  982. unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
  983. bitmap->bp[page].count += inc;
  984. bitmap_checkfree(bitmap, page);
  985. }
  986. static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap,
  987. sector_t offset, sector_t *blocks,
  988. int create);
  989. /*
  990. * bitmap daemon -- periodically wakes up to clean bits and flush pages
  991. * out to disk
  992. */
  993. void bitmap_daemon_work(struct mddev *mddev)
  994. {
  995. struct bitmap *bitmap;
  996. unsigned long j;
  997. unsigned long flags;
  998. struct page *page = NULL, *lastpage = NULL;
  999. sector_t blocks;
  1000. void *paddr;
  1001. /* Use a mutex to guard daemon_work against
  1002. * bitmap_destroy.
  1003. */
  1004. mutex_lock(&mddev->bitmap_info.mutex);
  1005. bitmap = mddev->bitmap;
  1006. if (bitmap == NULL) {
  1007. mutex_unlock(&mddev->bitmap_info.mutex);
  1008. return;
  1009. }
  1010. if (time_before(jiffies, bitmap->daemon_lastrun
  1011. + bitmap->mddev->bitmap_info.daemon_sleep))
  1012. goto done;
  1013. bitmap->daemon_lastrun = jiffies;
  1014. if (bitmap->allclean) {
  1015. bitmap->mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
  1016. goto done;
  1017. }
  1018. bitmap->allclean = 1;
  1019. spin_lock_irqsave(&bitmap->lock, flags);
  1020. for (j = 0; j < bitmap->chunks; j++) {
  1021. bitmap_counter_t *bmc;
  1022. if (!bitmap->filemap)
  1023. /* error or shutdown */
  1024. break;
  1025. page = filemap_get_page(bitmap, j);
  1026. if (page != lastpage) {
  1027. /* skip this page unless it's marked as needing cleaning */
  1028. if (!test_page_attr(bitmap, page, BITMAP_PAGE_PENDING)) {
  1029. int need_write = test_page_attr(bitmap, page,
  1030. BITMAP_PAGE_NEEDWRITE);
  1031. if (need_write)
  1032. clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE);
  1033. spin_unlock_irqrestore(&bitmap->lock, flags);
  1034. if (need_write)
  1035. write_page(bitmap, page, 0);
  1036. spin_lock_irqsave(&bitmap->lock, flags);
  1037. j |= (PAGE_BITS - 1);
  1038. continue;
  1039. }
  1040. /* grab the new page, sync and release the old */
  1041. if (lastpage != NULL) {
  1042. if (test_page_attr(bitmap, lastpage,
  1043. BITMAP_PAGE_NEEDWRITE)) {
  1044. clear_page_attr(bitmap, lastpage,
  1045. BITMAP_PAGE_NEEDWRITE);
  1046. spin_unlock_irqrestore(&bitmap->lock, flags);
  1047. write_page(bitmap, lastpage, 0);
  1048. } else {
  1049. set_page_attr(bitmap, lastpage,
  1050. BITMAP_PAGE_NEEDWRITE);
  1051. bitmap->allclean = 0;
  1052. spin_unlock_irqrestore(&bitmap->lock, flags);
  1053. }
  1054. } else
  1055. spin_unlock_irqrestore(&bitmap->lock, flags);
  1056. lastpage = page;
  1057. /* We are possibly going to clear some bits, so make
  1058. * sure that events_cleared is up-to-date.
  1059. */
  1060. if (bitmap->need_sync &&
  1061. bitmap->mddev->bitmap_info.external == 0) {
  1062. bitmap_super_t *sb;
  1063. bitmap->need_sync = 0;
  1064. sb = kmap_atomic(bitmap->sb_page, KM_USER0);
  1065. sb->events_cleared =
  1066. cpu_to_le64(bitmap->events_cleared);
  1067. kunmap_atomic(sb, KM_USER0);
  1068. write_page(bitmap, bitmap->sb_page, 1);
  1069. }
  1070. spin_lock_irqsave(&bitmap->lock, flags);
  1071. if (!bitmap->need_sync)
  1072. clear_page_attr(bitmap, page, BITMAP_PAGE_PENDING);
  1073. else
  1074. bitmap->allclean = 0;
  1075. }
  1076. bmc = bitmap_get_counter(bitmap,
  1077. (sector_t)j << CHUNK_BLOCK_SHIFT(bitmap),
  1078. &blocks, 0);
  1079. if (!bmc)
  1080. j |= PAGE_COUNTER_MASK;
  1081. else if (*bmc) {
  1082. if (*bmc == 1 && !bitmap->need_sync) {
  1083. /* we can clear the bit */
  1084. *bmc = 0;
  1085. bitmap_count_page(bitmap,
  1086. (sector_t)j << CHUNK_BLOCK_SHIFT(bitmap),
  1087. -1);
  1088. /* clear the bit */
  1089. paddr = kmap_atomic(page, KM_USER0);
  1090. if (bitmap->flags & BITMAP_HOSTENDIAN)
  1091. clear_bit(file_page_offset(bitmap, j),
  1092. paddr);
  1093. else
  1094. __clear_bit_le(
  1095. file_page_offset(bitmap,
  1096. j),
  1097. paddr);
  1098. kunmap_atomic(paddr, KM_USER0);
  1099. } else if (*bmc <= 2) {
  1100. *bmc = 1; /* maybe clear the bit next time */
  1101. set_page_attr(bitmap, page, BITMAP_PAGE_PENDING);
  1102. bitmap->allclean = 0;
  1103. }
  1104. }
  1105. }
  1106. spin_unlock_irqrestore(&bitmap->lock, flags);
  1107. /* now sync the final page */
  1108. if (lastpage != NULL) {
  1109. spin_lock_irqsave(&bitmap->lock, flags);
  1110. if (test_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE)) {
  1111. clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
  1112. spin_unlock_irqrestore(&bitmap->lock, flags);
  1113. write_page(bitmap, lastpage, 0);
  1114. } else {
  1115. set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
  1116. bitmap->allclean = 0;
  1117. spin_unlock_irqrestore(&bitmap->lock, flags);
  1118. }
  1119. }
  1120. done:
  1121. if (bitmap->allclean == 0)
  1122. bitmap->mddev->thread->timeout =
  1123. bitmap->mddev->bitmap_info.daemon_sleep;
  1124. mutex_unlock(&mddev->bitmap_info.mutex);
  1125. }
  1126. static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap,
  1127. sector_t offset, sector_t *blocks,
  1128. int create)
  1129. __releases(bitmap->lock)
  1130. __acquires(bitmap->lock)
  1131. {
  1132. /* If 'create', we might release the lock and reclaim it.
  1133. * The lock must have been taken with interrupts enabled.
  1134. * If !create, we don't release the lock.
  1135. */
  1136. sector_t chunk = offset >> CHUNK_BLOCK_SHIFT(bitmap);
  1137. unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
  1138. unsigned long pageoff = (chunk & PAGE_COUNTER_MASK) << COUNTER_BYTE_SHIFT;
  1139. sector_t csize;
  1140. int err;
  1141. err = bitmap_checkpage(bitmap, page, create);
  1142. if (bitmap->bp[page].hijacked ||
  1143. bitmap->bp[page].map == NULL)
  1144. csize = ((sector_t)1) << (CHUNK_BLOCK_SHIFT(bitmap) +
  1145. PAGE_COUNTER_SHIFT - 1);
  1146. else
  1147. csize = ((sector_t)1) << (CHUNK_BLOCK_SHIFT(bitmap));
  1148. *blocks = csize - (offset & (csize - 1));
  1149. if (err < 0)
  1150. return NULL;
  1151. /* now locked ... */
  1152. if (bitmap->bp[page].hijacked) { /* hijacked pointer */
  1153. /* should we use the first or second counter field
  1154. * of the hijacked pointer? */
  1155. int hi = (pageoff > PAGE_COUNTER_MASK);
  1156. return &((bitmap_counter_t *)
  1157. &bitmap->bp[page].map)[hi];
  1158. } else /* page is allocated */
  1159. return (bitmap_counter_t *)
  1160. &(bitmap->bp[page].map[pageoff]);
  1161. }
  1162. int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, int behind)
  1163. {
  1164. if (!bitmap)
  1165. return 0;
  1166. if (behind) {
  1167. int bw;
  1168. atomic_inc(&bitmap->behind_writes);
  1169. bw = atomic_read(&bitmap->behind_writes);
  1170. if (bw > bitmap->behind_writes_used)
  1171. bitmap->behind_writes_used = bw;
  1172. pr_debug("inc write-behind count %d/%lu\n",
  1173. bw, bitmap->mddev->bitmap_info.max_write_behind);
  1174. }
  1175. while (sectors) {
  1176. sector_t blocks;
  1177. bitmap_counter_t *bmc;
  1178. spin_lock_irq(&bitmap->lock);
  1179. bmc = bitmap_get_counter(bitmap, offset, &blocks, 1);
  1180. if (!bmc) {
  1181. spin_unlock_irq(&bitmap->lock);
  1182. return 0;
  1183. }
  1184. if (unlikely(COUNTER(*bmc) == COUNTER_MAX)) {
  1185. DEFINE_WAIT(__wait);
  1186. /* note that it is safe to do the prepare_to_wait
  1187. * after the test as long as we do it before dropping
  1188. * the spinlock.
  1189. */
  1190. prepare_to_wait(&bitmap->overflow_wait, &__wait,
  1191. TASK_UNINTERRUPTIBLE);
  1192. spin_unlock_irq(&bitmap->lock);
  1193. io_schedule();
  1194. finish_wait(&bitmap->overflow_wait, &__wait);
  1195. continue;
  1196. }
  1197. switch (*bmc) {
  1198. case 0:
  1199. bitmap_file_set_bit(bitmap, offset);
  1200. bitmap_count_page(bitmap, offset, 1);
  1201. /* fall through */
  1202. case 1:
  1203. *bmc = 2;
  1204. }
  1205. (*bmc)++;
  1206. spin_unlock_irq(&bitmap->lock);
  1207. offset += blocks;
  1208. if (sectors > blocks)
  1209. sectors -= blocks;
  1210. else
  1211. sectors = 0;
  1212. }
  1213. return 0;
  1214. }
  1215. EXPORT_SYMBOL(bitmap_startwrite);
  1216. void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors,
  1217. int success, int behind)
  1218. {
  1219. if (!bitmap)
  1220. return;
  1221. if (behind) {
  1222. if (atomic_dec_and_test(&bitmap->behind_writes))
  1223. wake_up(&bitmap->behind_wait);
  1224. pr_debug("dec write-behind count %d/%lu\n",
  1225. atomic_read(&bitmap->behind_writes),
  1226. bitmap->mddev->bitmap_info.max_write_behind);
  1227. }
  1228. if (bitmap->mddev->degraded)
  1229. /* Never clear bits or update events_cleared when degraded */
  1230. success = 0;
  1231. while (sectors) {
  1232. sector_t blocks;
  1233. unsigned long flags;
  1234. bitmap_counter_t *bmc;
  1235. spin_lock_irqsave(&bitmap->lock, flags);
  1236. bmc = bitmap_get_counter(bitmap, offset, &blocks, 0);
  1237. if (!bmc) {
  1238. spin_unlock_irqrestore(&bitmap->lock, flags);
  1239. return;
  1240. }
  1241. if (success &&
  1242. bitmap->events_cleared < bitmap->mddev->events) {
  1243. bitmap->events_cleared = bitmap->mddev->events;
  1244. bitmap->need_sync = 1;
  1245. sysfs_notify_dirent_safe(bitmap->sysfs_can_clear);
  1246. }
  1247. if (!success && !NEEDED(*bmc))
  1248. *bmc |= NEEDED_MASK;
  1249. if (COUNTER(*bmc) == COUNTER_MAX)
  1250. wake_up(&bitmap->overflow_wait);
  1251. (*bmc)--;
  1252. if (*bmc <= 2) {
  1253. set_page_attr(bitmap,
  1254. filemap_get_page(
  1255. bitmap,
  1256. offset >> CHUNK_BLOCK_SHIFT(bitmap)),
  1257. BITMAP_PAGE_PENDING);
  1258. bitmap->allclean = 0;
  1259. }
  1260. spin_unlock_irqrestore(&bitmap->lock, flags);
  1261. offset += blocks;
  1262. if (sectors > blocks)
  1263. sectors -= blocks;
  1264. else
  1265. sectors = 0;
  1266. }
  1267. }
  1268. EXPORT_SYMBOL(bitmap_endwrite);
  1269. static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
  1270. int degraded)
  1271. {
  1272. bitmap_counter_t *bmc;
  1273. int rv;
  1274. if (bitmap == NULL) {/* FIXME or bitmap set as 'failed' */
  1275. *blocks = 1024;
  1276. return 1; /* always resync if no bitmap */
  1277. }
  1278. spin_lock_irq(&bitmap->lock);
  1279. bmc = bitmap_get_counter(bitmap, offset, blocks, 0);
  1280. rv = 0;
  1281. if (bmc) {
  1282. /* locked */
  1283. if (RESYNC(*bmc))
  1284. rv = 1;
  1285. else if (NEEDED(*bmc)) {
  1286. rv = 1;
  1287. if (!degraded) { /* don't set/clear bits if degraded */
  1288. *bmc |= RESYNC_MASK;
  1289. *bmc &= ~NEEDED_MASK;
  1290. }
  1291. }
  1292. }
  1293. spin_unlock_irq(&bitmap->lock);
  1294. return rv;
  1295. }
  1296. int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
  1297. int degraded)
  1298. {
  1299. /* bitmap_start_sync must always report on multiples of whole
  1300. * pages, otherwise resync (which is very PAGE_SIZE based) will
  1301. * get confused.
  1302. * So call __bitmap_start_sync repeatedly (if needed) until
  1303. * At least PAGE_SIZE>>9 blocks are covered.
  1304. * Return the 'or' of the result.
  1305. */
  1306. int rv = 0;
  1307. sector_t blocks1;
  1308. *blocks = 0;
  1309. while (*blocks < (PAGE_SIZE>>9)) {
  1310. rv |= __bitmap_start_sync(bitmap, offset,
  1311. &blocks1, degraded);
  1312. offset += blocks1;
  1313. *blocks += blocks1;
  1314. }
  1315. return rv;
  1316. }
  1317. EXPORT_SYMBOL(bitmap_start_sync);
  1318. void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted)
  1319. {
  1320. bitmap_counter_t *bmc;
  1321. unsigned long flags;
  1322. if (bitmap == NULL) {
  1323. *blocks = 1024;
  1324. return;
  1325. }
  1326. spin_lock_irqsave(&bitmap->lock, flags);
  1327. bmc = bitmap_get_counter(bitmap, offset, blocks, 0);
  1328. if (bmc == NULL)
  1329. goto unlock;
  1330. /* locked */
  1331. if (RESYNC(*bmc)) {
  1332. *bmc &= ~RESYNC_MASK;
  1333. if (!NEEDED(*bmc) && aborted)
  1334. *bmc |= NEEDED_MASK;
  1335. else {
  1336. if (*bmc <= 2) {
  1337. set_page_attr(bitmap,
  1338. filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap)),
  1339. BITMAP_PAGE_PENDING);
  1340. bitmap->allclean = 0;
  1341. }
  1342. }
  1343. }
  1344. unlock:
  1345. spin_unlock_irqrestore(&bitmap->lock, flags);
  1346. }
  1347. EXPORT_SYMBOL(bitmap_end_sync);
  1348. void bitmap_close_sync(struct bitmap *bitmap)
  1349. {
  1350. /* Sync has finished, and any bitmap chunks that weren't synced
  1351. * properly have been aborted. It remains to us to clear the
  1352. * RESYNC bit wherever it is still on
  1353. */
  1354. sector_t sector = 0;
  1355. sector_t blocks;
  1356. if (!bitmap)
  1357. return;
  1358. while (sector < bitmap->mddev->resync_max_sectors) {
  1359. bitmap_end_sync(bitmap, sector, &blocks, 0);
  1360. sector += blocks;
  1361. }
  1362. }
  1363. EXPORT_SYMBOL(bitmap_close_sync);
  1364. void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector)
  1365. {
  1366. sector_t s = 0;
  1367. sector_t blocks;
  1368. if (!bitmap)
  1369. return;
  1370. if (sector == 0) {
  1371. bitmap->last_end_sync = jiffies;
  1372. return;
  1373. }
  1374. if (time_before(jiffies, (bitmap->last_end_sync
  1375. + bitmap->mddev->bitmap_info.daemon_sleep)))
  1376. return;
  1377. wait_event(bitmap->mddev->recovery_wait,
  1378. atomic_read(&bitmap->mddev->recovery_active) == 0);
  1379. bitmap->mddev->curr_resync_completed = sector;
  1380. set_bit(MD_CHANGE_CLEAN, &bitmap->mddev->flags);
  1381. sector &= ~((1ULL << CHUNK_BLOCK_SHIFT(bitmap)) - 1);
  1382. s = 0;
  1383. while (s < sector && s < bitmap->mddev->resync_max_sectors) {
  1384. bitmap_end_sync(bitmap, s, &blocks, 0);
  1385. s += blocks;
  1386. }
  1387. bitmap->last_end_sync = jiffies;
  1388. sysfs_notify(&bitmap->mddev->kobj, NULL, "sync_completed");
  1389. }
  1390. EXPORT_SYMBOL(bitmap_cond_end_sync);
  1391. static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed)
  1392. {
  1393. /* For each chunk covered by any of these sectors, set the
  1394. * counter to 1 and set resync_needed. They should all
  1395. * be 0 at this point
  1396. */
  1397. sector_t secs;
  1398. bitmap_counter_t *bmc;
  1399. spin_lock_irq(&bitmap->lock);
  1400. bmc = bitmap_get_counter(bitmap, offset, &secs, 1);
  1401. if (!bmc) {
  1402. spin_unlock_irq(&bitmap->lock);
  1403. return;
  1404. }
  1405. if (!*bmc) {
  1406. struct page *page;
  1407. *bmc = 1 | (needed ? NEEDED_MASK : 0);
  1408. bitmap_count_page(bitmap, offset, 1);
  1409. page = filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap));
  1410. set_page_attr(bitmap, page, BITMAP_PAGE_PENDING);
  1411. bitmap->allclean = 0;
  1412. }
  1413. spin_unlock_irq(&bitmap->lock);
  1414. }
  1415. /* dirty the memory and file bits for bitmap chunks "s" to "e" */
  1416. void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e)
  1417. {
  1418. unsigned long chunk;
  1419. for (chunk = s; chunk <= e; chunk++) {
  1420. sector_t sec = (sector_t)chunk << CHUNK_BLOCK_SHIFT(bitmap);
  1421. bitmap_set_memory_bits(bitmap, sec, 1);
  1422. bitmap_file_set_bit(bitmap, sec);
  1423. if (sec < bitmap->mddev->recovery_cp)
  1424. /* We are asserting that the array is dirty,
  1425. * so move the recovery_cp address back so
  1426. * that it is obvious that it is dirty
  1427. */
  1428. bitmap->mddev->recovery_cp = sec;
  1429. }
  1430. }
  1431. /*
  1432. * flush out any pending updates
  1433. */
  1434. void bitmap_flush(struct mddev *mddev)
  1435. {
  1436. struct bitmap *bitmap = mddev->bitmap;
  1437. long sleep;
  1438. if (!bitmap) /* there was no bitmap */
  1439. return;
  1440. /* run the daemon_work three time to ensure everything is flushed
  1441. * that can be
  1442. */
  1443. sleep = mddev->bitmap_info.daemon_sleep * 2;
  1444. bitmap->daemon_lastrun -= sleep;
  1445. bitmap_daemon_work(mddev);
  1446. bitmap->daemon_lastrun -= sleep;
  1447. bitmap_daemon_work(mddev);
  1448. bitmap->daemon_lastrun -= sleep;
  1449. bitmap_daemon_work(mddev);
  1450. bitmap_update_sb(bitmap);
  1451. }
  1452. /*
  1453. * free memory that was allocated
  1454. */
  1455. static void bitmap_free(struct bitmap *bitmap)
  1456. {
  1457. unsigned long k, pages;
  1458. struct bitmap_page *bp;
  1459. if (!bitmap) /* there was no bitmap */
  1460. return;
  1461. /* release the bitmap file and kill the daemon */
  1462. bitmap_file_put(bitmap);
  1463. bp = bitmap->bp;
  1464. pages = bitmap->pages;
  1465. /* free all allocated memory */
  1466. if (bp) /* deallocate the page memory */
  1467. for (k = 0; k < pages; k++)
  1468. if (bp[k].map && !bp[k].hijacked)
  1469. kfree(bp[k].map);
  1470. kfree(bp);
  1471. kfree(bitmap);
  1472. }
  1473. void bitmap_destroy(struct mddev *mddev)
  1474. {
  1475. struct bitmap *bitmap = mddev->bitmap;
  1476. if (!bitmap) /* there was no bitmap */
  1477. return;
  1478. mutex_lock(&mddev->bitmap_info.mutex);
  1479. mddev->bitmap = NULL; /* disconnect from the md device */
  1480. mutex_unlock(&mddev->bitmap_info.mutex);
  1481. if (mddev->thread)
  1482. mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
  1483. if (bitmap->sysfs_can_clear)
  1484. sysfs_put(bitmap->sysfs_can_clear);
  1485. bitmap_free(bitmap);
  1486. }
  1487. /*
  1488. * initialize the bitmap structure
  1489. * if this returns an error, bitmap_destroy must be called to do clean up
  1490. */
  1491. int bitmap_create(struct mddev *mddev)
  1492. {
  1493. struct bitmap *bitmap;
  1494. sector_t blocks = mddev->resync_max_sectors;
  1495. unsigned long chunks;
  1496. unsigned long pages;
  1497. struct file *file = mddev->bitmap_info.file;
  1498. int err;
  1499. struct sysfs_dirent *bm = NULL;
  1500. BUILD_BUG_ON(sizeof(bitmap_super_t) != 256);
  1501. if (!file
  1502. && !mddev->bitmap_info.offset) /* bitmap disabled, nothing to do */
  1503. return 0;
  1504. BUG_ON(file && mddev->bitmap_info.offset);
  1505. bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL);
  1506. if (!bitmap)
  1507. return -ENOMEM;
  1508. spin_lock_init(&bitmap->lock);
  1509. atomic_set(&bitmap->pending_writes, 0);
  1510. init_waitqueue_head(&bitmap->write_wait);
  1511. init_waitqueue_head(&bitmap->overflow_wait);
  1512. init_waitqueue_head(&bitmap->behind_wait);
  1513. bitmap->mddev = mddev;
  1514. if (mddev->kobj.sd)
  1515. bm = sysfs_get_dirent(mddev->kobj.sd, NULL, "bitmap");
  1516. if (bm) {
  1517. bitmap->sysfs_can_clear = sysfs_get_dirent(bm, NULL, "can_clear");
  1518. sysfs_put(bm);
  1519. } else
  1520. bitmap->sysfs_can_clear = NULL;
  1521. bitmap->file = file;
  1522. if (file) {
  1523. get_file(file);
  1524. /* As future accesses to this file will use bmap,
  1525. * and bypass the page cache, we must sync the file
  1526. * first.
  1527. */
  1528. vfs_fsync(file, 1);
  1529. }
  1530. /* read superblock from bitmap file (this sets mddev->bitmap_info.chunksize) */
  1531. if (!mddev->bitmap_info.external) {
  1532. /*
  1533. * If 'MD_ARRAY_FIRST_USE' is set, then device-mapper is
  1534. * instructing us to create a new on-disk bitmap instance.
  1535. */
  1536. if (test_and_clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags))
  1537. err = bitmap_new_disk_sb(bitmap);
  1538. else
  1539. err = bitmap_read_sb(bitmap);
  1540. } else {
  1541. err = 0;
  1542. if (mddev->bitmap_info.chunksize == 0 ||
  1543. mddev->bitmap_info.daemon_sleep == 0)
  1544. /* chunksize and time_base need to be
  1545. * set first. */
  1546. err = -EINVAL;
  1547. }
  1548. if (err)
  1549. goto error;
  1550. bitmap->daemon_lastrun = jiffies;
  1551. bitmap->chunkshift = ffz(~mddev->bitmap_info.chunksize);
  1552. /* now that chunksize and chunkshift are set, we can use these macros */
  1553. chunks = (blocks + CHUNK_BLOCK_RATIO(bitmap) - 1) >>
  1554. CHUNK_BLOCK_SHIFT(bitmap);
  1555. pages = (chunks + PAGE_COUNTER_RATIO - 1) / PAGE_COUNTER_RATIO;
  1556. BUG_ON(!pages);
  1557. bitmap->chunks = chunks;
  1558. bitmap->pages = pages;
  1559. bitmap->missing_pages = pages;
  1560. bitmap->bp = kzalloc(pages * sizeof(*bitmap->bp), GFP_KERNEL);
  1561. err = -ENOMEM;
  1562. if (!bitmap->bp)
  1563. goto error;
  1564. printk(KERN_INFO "created bitmap (%lu pages) for device %s\n",
  1565. pages, bmname(bitmap));
  1566. mddev->bitmap = bitmap;
  1567. return (bitmap->flags & BITMAP_WRITE_ERROR) ? -EIO : 0;
  1568. error:
  1569. bitmap_free(bitmap);
  1570. return err;
  1571. }
  1572. int bitmap_load(struct mddev *mddev)
  1573. {
  1574. int err = 0;
  1575. sector_t start = 0;
  1576. sector_t sector = 0;
  1577. struct bitmap *bitmap = mddev->bitmap;
  1578. if (!bitmap)
  1579. goto out;
  1580. /* Clear out old bitmap info first: Either there is none, or we
  1581. * are resuming after someone else has possibly changed things,
  1582. * so we should forget old cached info.
  1583. * All chunks should be clean, but some might need_sync.
  1584. */
  1585. while (sector < mddev->resync_max_sectors) {
  1586. sector_t blocks;
  1587. bitmap_start_sync(bitmap, sector, &blocks, 0);
  1588. sector += blocks;
  1589. }
  1590. bitmap_close_sync(bitmap);
  1591. if (mddev->degraded == 0
  1592. || bitmap->events_cleared == mddev->events)
  1593. /* no need to keep dirty bits to optimise a
  1594. * re-add of a missing device */
  1595. start = mddev->recovery_cp;
  1596. err = bitmap_init_from_disk(bitmap, start);
  1597. if (err)
  1598. goto out;
  1599. mddev->thread->timeout = mddev->bitmap_info.daemon_sleep;
  1600. md_wakeup_thread(mddev->thread);
  1601. bitmap_update_sb(bitmap);
  1602. if (bitmap->flags & BITMAP_WRITE_ERROR)
  1603. err = -EIO;
  1604. out:
  1605. return err;
  1606. }
  1607. EXPORT_SYMBOL_GPL(bitmap_load);
  1608. static ssize_t
  1609. location_show(struct mddev *mddev, char *page)
  1610. {
  1611. ssize_t len;
  1612. if (mddev->bitmap_info.file)
  1613. len = sprintf(page, "file");
  1614. else if (mddev->bitmap_info.offset)
  1615. len = sprintf(page, "%+lld", (long long)mddev->bitmap_info.offset);
  1616. else
  1617. len = sprintf(page, "none");
  1618. len += sprintf(page+len, "\n");
  1619. return len;
  1620. }
  1621. static ssize_t
  1622. location_store(struct mddev *mddev, const char *buf, size_t len)
  1623. {
  1624. if (mddev->pers) {
  1625. if (!mddev->pers->quiesce)
  1626. return -EBUSY;
  1627. if (mddev->recovery || mddev->sync_thread)
  1628. return -EBUSY;
  1629. }
  1630. if (mddev->bitmap || mddev->bitmap_info.file ||
  1631. mddev->bitmap_info.offset) {
  1632. /* bitmap already configured. Only option is to clear it */
  1633. if (strncmp(buf, "none", 4) != 0)
  1634. return -EBUSY;
  1635. if (mddev->pers) {
  1636. mddev->pers->quiesce(mddev, 1);
  1637. bitmap_destroy(mddev);
  1638. mddev->pers->quiesce(mddev, 0);
  1639. }
  1640. mddev->bitmap_info.offset = 0;
  1641. if (mddev->bitmap_info.file) {
  1642. struct file *f = mddev->bitmap_info.file;
  1643. mddev->bitmap_info.file = NULL;
  1644. restore_bitmap_write_access(f);
  1645. fput(f);
  1646. }
  1647. } else {
  1648. /* No bitmap, OK to set a location */
  1649. long long offset;
  1650. if (strncmp(buf, "none", 4) == 0)
  1651. /* nothing to be done */;
  1652. else if (strncmp(buf, "file:", 5) == 0) {
  1653. /* Not supported yet */
  1654. return -EINVAL;
  1655. } else {
  1656. int rv;
  1657. if (buf[0] == '+')
  1658. rv = strict_strtoll(buf+1, 10, &offset);
  1659. else
  1660. rv = strict_strtoll(buf, 10, &offset);
  1661. if (rv)
  1662. return rv;
  1663. if (offset == 0)
  1664. return -EINVAL;
  1665. if (mddev->bitmap_info.external == 0 &&
  1666. mddev->major_version == 0 &&
  1667. offset != mddev->bitmap_info.default_offset)
  1668. return -EINVAL;
  1669. mddev->bitmap_info.offset = offset;
  1670. if (mddev->pers) {
  1671. mddev->pers->quiesce(mddev, 1);
  1672. rv = bitmap_create(mddev);
  1673. if (rv) {
  1674. bitmap_destroy(mddev);
  1675. mddev->bitmap_info.offset = 0;
  1676. }
  1677. mddev->pers->quiesce(mddev, 0);
  1678. if (rv)
  1679. return rv;
  1680. }
  1681. }
  1682. }
  1683. if (!mddev->external) {
  1684. /* Ensure new bitmap info is stored in
  1685. * metadata promptly.
  1686. */
  1687. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  1688. md_wakeup_thread(mddev->thread);
  1689. }
  1690. return len;
  1691. }
  1692. static struct md_sysfs_entry bitmap_location =
  1693. __ATTR(location, S_IRUGO|S_IWUSR, location_show, location_store);
  1694. static ssize_t
  1695. timeout_show(struct mddev *mddev, char *page)
  1696. {
  1697. ssize_t len;
  1698. unsigned long secs = mddev->bitmap_info.daemon_sleep / HZ;
  1699. unsigned long jifs = mddev->bitmap_info.daemon_sleep % HZ;
  1700. len = sprintf(page, "%lu", secs);
  1701. if (jifs)
  1702. len += sprintf(page+len, ".%03u", jiffies_to_msecs(jifs));
  1703. len += sprintf(page+len, "\n");
  1704. return len;
  1705. }
  1706. static ssize_t
  1707. timeout_store(struct mddev *mddev, const char *buf, size_t len)
  1708. {
  1709. /* timeout can be set at any time */
  1710. unsigned long timeout;
  1711. int rv = strict_strtoul_scaled(buf, &timeout, 4);
  1712. if (rv)
  1713. return rv;
  1714. /* just to make sure we don't overflow... */
  1715. if (timeout >= LONG_MAX / HZ)
  1716. return -EINVAL;
  1717. timeout = timeout * HZ / 10000;
  1718. if (timeout >= MAX_SCHEDULE_TIMEOUT)
  1719. timeout = MAX_SCHEDULE_TIMEOUT-1;
  1720. if (timeout < 1)
  1721. timeout = 1;
  1722. mddev->bitmap_info.daemon_sleep = timeout;
  1723. if (mddev->thread) {
  1724. /* if thread->timeout is MAX_SCHEDULE_TIMEOUT, then
  1725. * the bitmap is all clean and we don't need to
  1726. * adjust the timeout right now
  1727. */
  1728. if (mddev->thread->timeout < MAX_SCHEDULE_TIMEOUT) {
  1729. mddev->thread->timeout = timeout;
  1730. md_wakeup_thread(mddev->thread);
  1731. }
  1732. }
  1733. return len;
  1734. }
  1735. static struct md_sysfs_entry bitmap_timeout =
  1736. __ATTR(time_base, S_IRUGO|S_IWUSR, timeout_show, timeout_store);
  1737. static ssize_t
  1738. backlog_show(struct mddev *mddev, char *page)
  1739. {
  1740. return sprintf(page, "%lu\n", mddev->bitmap_info.max_write_behind);
  1741. }
  1742. static ssize_t
  1743. backlog_store(struct mddev *mddev, const char *buf, size_t len)
  1744. {
  1745. unsigned long backlog;
  1746. int rv = strict_strtoul(buf, 10, &backlog);
  1747. if (rv)
  1748. return rv;
  1749. if (backlog > COUNTER_MAX)
  1750. return -EINVAL;
  1751. mddev->bitmap_info.max_write_behind = backlog;
  1752. return len;
  1753. }
  1754. static struct md_sysfs_entry bitmap_backlog =
  1755. __ATTR(backlog, S_IRUGO|S_IWUSR, backlog_show, backlog_store);
  1756. static ssize_t
  1757. chunksize_show(struct mddev *mddev, char *page)
  1758. {
  1759. return sprintf(page, "%lu\n", mddev->bitmap_info.chunksize);
  1760. }
  1761. static ssize_t
  1762. chunksize_store(struct mddev *mddev, const char *buf, size_t len)
  1763. {
  1764. /* Can only be changed when no bitmap is active */
  1765. int rv;
  1766. unsigned long csize;
  1767. if (mddev->bitmap)
  1768. return -EBUSY;
  1769. rv = strict_strtoul(buf, 10, &csize);
  1770. if (rv)
  1771. return rv;
  1772. if (csize < 512 ||
  1773. !is_power_of_2(csize))
  1774. return -EINVAL;
  1775. mddev->bitmap_info.chunksize = csize;
  1776. return len;
  1777. }
  1778. static struct md_sysfs_entry bitmap_chunksize =
  1779. __ATTR(chunksize, S_IRUGO|S_IWUSR, chunksize_show, chunksize_store);
  1780. static ssize_t metadata_show(struct mddev *mddev, char *page)
  1781. {
  1782. return sprintf(page, "%s\n", (mddev->bitmap_info.external
  1783. ? "external" : "internal"));
  1784. }
  1785. static ssize_t metadata_store(struct mddev *mddev, const char *buf, size_t len)
  1786. {
  1787. if (mddev->bitmap ||
  1788. mddev->bitmap_info.file ||
  1789. mddev->bitmap_info.offset)
  1790. return -EBUSY;
  1791. if (strncmp(buf, "external", 8) == 0)
  1792. mddev->bitmap_info.external = 1;
  1793. else if (strncmp(buf, "internal", 8) == 0)
  1794. mddev->bitmap_info.external = 0;
  1795. else
  1796. return -EINVAL;
  1797. return len;
  1798. }
  1799. static struct md_sysfs_entry bitmap_metadata =
  1800. __ATTR(metadata, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
  1801. static ssize_t can_clear_show(struct mddev *mddev, char *page)
  1802. {
  1803. int len;
  1804. if (mddev->bitmap)
  1805. len = sprintf(page, "%s\n", (mddev->bitmap->need_sync ?
  1806. "false" : "true"));
  1807. else
  1808. len = sprintf(page, "\n");
  1809. return len;
  1810. }
  1811. static ssize_t can_clear_store(struct mddev *mddev, const char *buf, size_t len)
  1812. {
  1813. if (mddev->bitmap == NULL)
  1814. return -ENOENT;
  1815. if (strncmp(buf, "false", 5) == 0)
  1816. mddev->bitmap->need_sync = 1;
  1817. else if (strncmp(buf, "true", 4) == 0) {
  1818. if (mddev->degraded)
  1819. return -EBUSY;
  1820. mddev->bitmap->need_sync = 0;
  1821. } else
  1822. return -EINVAL;
  1823. return len;
  1824. }
  1825. static struct md_sysfs_entry bitmap_can_clear =
  1826. __ATTR(can_clear, S_IRUGO|S_IWUSR, can_clear_show, can_clear_store);
  1827. static ssize_t
  1828. behind_writes_used_show(struct mddev *mddev, char *page)
  1829. {
  1830. if (mddev->bitmap == NULL)
  1831. return sprintf(page, "0\n");
  1832. return sprintf(page, "%lu\n",
  1833. mddev->bitmap->behind_writes_used);
  1834. }
  1835. static ssize_t
  1836. behind_writes_used_reset(struct mddev *mddev, const char *buf, size_t len)
  1837. {
  1838. if (mddev->bitmap)
  1839. mddev->bitmap->behind_writes_used = 0;
  1840. return len;
  1841. }
  1842. static struct md_sysfs_entry max_backlog_used =
  1843. __ATTR(max_backlog_used, S_IRUGO | S_IWUSR,
  1844. behind_writes_used_show, behind_writes_used_reset);
  1845. static struct attribute *md_bitmap_attrs[] = {
  1846. &bitmap_location.attr,
  1847. &bitmap_timeout.attr,
  1848. &bitmap_backlog.attr,
  1849. &bitmap_chunksize.attr,
  1850. &bitmap_metadata.attr,
  1851. &bitmap_can_clear.attr,
  1852. &max_backlog_used.attr,
  1853. NULL
  1854. };
  1855. struct attribute_group md_bitmap_group = {
  1856. .name = "bitmap",
  1857. .attrs = md_bitmap_attrs,
  1858. };