drbd_bitmap.c 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582
  1. /*
  2. drbd_bitmap.c
  3. This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
  4. Copyright (C) 2004-2008, LINBIT Information Technologies GmbH.
  5. Copyright (C) 2004-2008, Philipp Reisner <philipp.reisner@linbit.com>.
  6. Copyright (C) 2004-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
  7. drbd is free software; you can redistribute it and/or modify
  8. it under the terms of the GNU General Public License as published by
  9. the Free Software Foundation; either version 2, or (at your option)
  10. any later version.
  11. drbd is distributed in the hope that it will be useful,
  12. but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. GNU General Public License for more details.
  15. You should have received a copy of the GNU General Public License
  16. along with drbd; see the file COPYING. If not, write to
  17. the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  18. */
  19. #include <linux/bitops.h>
  20. #include <linux/vmalloc.h>
  21. #include <linux/string.h>
  22. #include <linux/drbd.h>
  23. #include <linux/slab.h>
  24. #include <asm/kmap_types.h>
  25. #include <asm-generic/bitops/le.h>
  26. #include "drbd_int.h"
  27. /* OPAQUE outside this file!
  28. * interface defined in drbd_int.h
  29. * convention:
  30. * function name drbd_bm_... => used elsewhere, "public".
  31. * function name bm_... => internal to implementation, "private".
  32. */
  33. /*
  34. * LIMITATIONS:
  35. * We want to support >= peta byte of backend storage, while for now still using
  36. * a granularity of one bit per 4KiB of storage.
  37. * 1 << 50 bytes backend storage (1 PiB)
  38. * 1 << (50 - 12) bits needed
  39. * 38 --> we need u64 to index and count bits
  40. * 1 << (38 - 3) bitmap bytes needed
  41. * 35 --> we still need u64 to index and count bytes
  42. * (that's 32 GiB of bitmap for 1 PiB storage)
  43. * 1 << (35 - 2) 32bit longs needed
  44. * 33 --> we'd even need u64 to index and count 32bit long words.
  45. * 1 << (35 - 3) 64bit longs needed
  46. * 32 --> we could get away with a 32bit unsigned int to index and count
  47. * 64bit long words, but I rather stay with unsigned long for now.
  48. * We probably should neither count nor point to bytes or long words
  49. * directly, but either by bitnumber, or by page index and offset.
  50. * 1 << (35 - 12)
  51. * 22 --> we need that much 4KiB pages of bitmap.
  52. * 1 << (22 + 3) --> on a 64bit arch,
  53. * we need 32 MiB to store the array of page pointers.
  54. *
  55. * Because I'm lazy, and because the resulting patch was too large, too ugly
  56. * and still incomplete, on 32bit we still "only" support 16 TiB (minus some),
  57. * (1 << 32) bits * 4k storage.
  58. *
  59. * bitmap storage and IO:
  60. * Bitmap is stored little endian on disk, and is kept little endian in
  61. * core memory. Currently we still hold the full bitmap in core as long
  62. * as we are "attached" to a local disk, which at 32 GiB for 1PiB storage
  63. * seems excessive.
  64. *
  65. * We plan to reduce the amount of in-core bitmap pages by pageing them in
  66. * and out against their on-disk location as necessary, but need to make
  67. * sure we don't cause too much meta data IO, and must not deadlock in
  68. * tight memory situations. This needs some more work.
  69. */
  70. /*
  71. * NOTE
  72. * Access to the *bm_pages is protected by bm_lock.
  73. * It is safe to read the other members within the lock.
  74. *
  75. * drbd_bm_set_bits is called from bio_endio callbacks,
  76. * We may be called with irq already disabled,
  77. * so we need spin_lock_irqsave().
  78. * And we need the kmap_atomic.
  79. */
  80. struct drbd_bitmap {
  81. struct page **bm_pages;
  82. spinlock_t bm_lock;
  83. /* see LIMITATIONS: above */
  84. unsigned long bm_set; /* nr of set bits; THINK maybe atomic_t? */
  85. unsigned long bm_bits;
  86. size_t bm_words;
  87. size_t bm_number_of_pages;
  88. sector_t bm_dev_capacity;
  89. struct mutex bm_change; /* serializes resize operations */
  90. wait_queue_head_t bm_io_wait; /* used to serialize IO of single pages */
  91. enum bm_flag bm_flags;
  92. /* debugging aid, in case we are still racy somewhere */
  93. char *bm_why;
  94. struct task_struct *bm_task;
  95. };
  96. static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
  97. unsigned long e, int val, const enum km_type km);
  98. #define bm_print_lock_info(m) __bm_print_lock_info(m, __func__)
  99. static void __bm_print_lock_info(struct drbd_conf *mdev, const char *func)
  100. {
  101. struct drbd_bitmap *b = mdev->bitmap;
  102. if (!__ratelimit(&drbd_ratelimit_state))
  103. return;
  104. dev_err(DEV, "FIXME %s in %s, bitmap locked for '%s' by %s\n",
  105. current == mdev->receiver.task ? "receiver" :
  106. current == mdev->asender.task ? "asender" :
  107. current == mdev->worker.task ? "worker" : current->comm,
  108. func, b->bm_why ?: "?",
  109. b->bm_task == mdev->receiver.task ? "receiver" :
  110. b->bm_task == mdev->asender.task ? "asender" :
  111. b->bm_task == mdev->worker.task ? "worker" : "?");
  112. }
  113. void drbd_bm_lock(struct drbd_conf *mdev, char *why, enum bm_flag flags)
  114. {
  115. struct drbd_bitmap *b = mdev->bitmap;
  116. int trylock_failed;
  117. if (!b) {
  118. dev_err(DEV, "FIXME no bitmap in drbd_bm_lock!?\n");
  119. return;
  120. }
  121. trylock_failed = !mutex_trylock(&b->bm_change);
  122. if (trylock_failed) {
  123. dev_warn(DEV, "%s going to '%s' but bitmap already locked for '%s' by %s\n",
  124. current == mdev->receiver.task ? "receiver" :
  125. current == mdev->asender.task ? "asender" :
  126. current == mdev->worker.task ? "worker" : current->comm,
  127. why, b->bm_why ?: "?",
  128. b->bm_task == mdev->receiver.task ? "receiver" :
  129. b->bm_task == mdev->asender.task ? "asender" :
  130. b->bm_task == mdev->worker.task ? "worker" : "?");
  131. mutex_lock(&b->bm_change);
  132. }
  133. if (BM_LOCKED_MASK & b->bm_flags)
  134. dev_err(DEV, "FIXME bitmap already locked in bm_lock\n");
  135. b->bm_flags |= flags & BM_LOCKED_MASK;
  136. b->bm_why = why;
  137. b->bm_task = current;
  138. }
  139. void drbd_bm_unlock(struct drbd_conf *mdev)
  140. {
  141. struct drbd_bitmap *b = mdev->bitmap;
  142. if (!b) {
  143. dev_err(DEV, "FIXME no bitmap in drbd_bm_unlock!?\n");
  144. return;
  145. }
  146. if (!(BM_LOCKED_MASK & mdev->bitmap->bm_flags))
  147. dev_err(DEV, "FIXME bitmap not locked in bm_unlock\n");
  148. b->bm_flags &= ~BM_LOCKED_MASK;
  149. b->bm_why = NULL;
  150. b->bm_task = NULL;
  151. mutex_unlock(&b->bm_change);
  152. }
  153. /* we store some "meta" info about our pages in page->private */
  154. /* at a granularity of 4k storage per bitmap bit:
  155. * one peta byte storage: 1<<50 byte, 1<<38 * 4k storage blocks
  156. * 1<<38 bits,
  157. * 1<<23 4k bitmap pages.
  158. * Use 24 bits as page index, covers 2 peta byte storage
  159. * at a granularity of 4k per bit.
  160. * Used to report the failed page idx on io error from the endio handlers.
  161. */
  162. #define BM_PAGE_IDX_MASK ((1UL<<24)-1)
  163. /* this page is currently read in, or written back */
  164. #define BM_PAGE_IO_LOCK 31
  165. /* if there has been an IO error for this page */
  166. #define BM_PAGE_IO_ERROR 30
  167. /* this is to be able to intelligently skip disk IO,
  168. * set if bits have been set since last IO. */
  169. #define BM_PAGE_NEED_WRITEOUT 29
  170. /* to mark for lazy writeout once syncer cleared all clearable bits,
  171. * we if bits have been cleared since last IO. */
  172. #define BM_PAGE_LAZY_WRITEOUT 28
  173. /* store_page_idx uses non-atomic assingment. It is only used directly after
  174. * allocating the page. All other bm_set_page_* and bm_clear_page_* need to
  175. * use atomic bit manipulation, as set_out_of_sync (and therefore bitmap
  176. * changes) may happen from various contexts, and wait_on_bit/wake_up_bit
  177. * requires it all to be atomic as well. */
  178. static void bm_store_page_idx(struct page *page, unsigned long idx)
  179. {
  180. BUG_ON(0 != (idx & ~BM_PAGE_IDX_MASK));
  181. page_private(page) |= idx;
  182. }
  183. static unsigned long bm_page_to_idx(struct page *page)
  184. {
  185. return page_private(page) & BM_PAGE_IDX_MASK;
  186. }
  187. /* As is very unlikely that the same page is under IO from more than one
  188. * context, we can get away with a bit per page and one wait queue per bitmap.
  189. */
  190. static void bm_page_lock_io(struct drbd_conf *mdev, int page_nr)
  191. {
  192. struct drbd_bitmap *b = mdev->bitmap;
  193. void *addr = &page_private(b->bm_pages[page_nr]);
  194. wait_event(b->bm_io_wait, !test_and_set_bit(BM_PAGE_IO_LOCK, addr));
  195. }
  196. static void bm_page_unlock_io(struct drbd_conf *mdev, int page_nr)
  197. {
  198. struct drbd_bitmap *b = mdev->bitmap;
  199. void *addr = &page_private(b->bm_pages[page_nr]);
  200. clear_bit(BM_PAGE_IO_LOCK, addr);
  201. smp_mb__after_clear_bit();
  202. wake_up(&mdev->bitmap->bm_io_wait);
  203. }
  204. /* set _before_ submit_io, so it may be reset due to being changed
  205. * while this page is in flight... will get submitted later again */
  206. static void bm_set_page_unchanged(struct page *page)
  207. {
  208. /* use cmpxchg? */
  209. clear_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page));
  210. clear_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
  211. }
  212. static void bm_set_page_need_writeout(struct page *page)
  213. {
  214. set_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page));
  215. }
  216. static int bm_test_page_unchanged(struct page *page)
  217. {
  218. volatile const unsigned long *addr = &page_private(page);
  219. return (*addr & ((1UL<<BM_PAGE_NEED_WRITEOUT)|(1UL<<BM_PAGE_LAZY_WRITEOUT))) == 0;
  220. }
  221. static void bm_set_page_io_err(struct page *page)
  222. {
  223. set_bit(BM_PAGE_IO_ERROR, &page_private(page));
  224. }
  225. static void bm_clear_page_io_err(struct page *page)
  226. {
  227. clear_bit(BM_PAGE_IO_ERROR, &page_private(page));
  228. }
  229. static void bm_set_page_lazy_writeout(struct page *page)
  230. {
  231. set_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
  232. }
  233. static int bm_test_page_lazy_writeout(struct page *page)
  234. {
  235. return test_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
  236. }
  237. /* on a 32bit box, this would allow for exactly (2<<38) bits. */
  238. static unsigned int bm_word_to_page_idx(struct drbd_bitmap *b, unsigned long long_nr)
  239. {
  240. /* page_nr = (word*sizeof(long)) >> PAGE_SHIFT; */
  241. unsigned int page_nr = long_nr >> (PAGE_SHIFT - LN2_BPL + 3);
  242. BUG_ON(page_nr >= b->bm_number_of_pages);
  243. return page_nr;
  244. }
  245. static unsigned int bm_bit_to_page_idx(struct drbd_bitmap *b, u64 bitnr)
  246. {
  247. /* page_nr = (bitnr/8) >> PAGE_SHIFT; */
  248. unsigned int page_nr = bitnr >> (PAGE_SHIFT + 3);
  249. BUG_ON(page_nr >= b->bm_number_of_pages);
  250. return page_nr;
  251. }
  252. static unsigned long *__bm_map_pidx(struct drbd_bitmap *b, unsigned int idx, const enum km_type km)
  253. {
  254. struct page *page = b->bm_pages[idx];
  255. return (unsigned long *) kmap_atomic(page, km);
  256. }
  257. static unsigned long *bm_map_pidx(struct drbd_bitmap *b, unsigned int idx)
  258. {
  259. return __bm_map_pidx(b, idx, KM_IRQ1);
  260. }
  261. static void __bm_unmap(unsigned long *p_addr, const enum km_type km)
  262. {
  263. kunmap_atomic(p_addr, km);
  264. };
  265. static void bm_unmap(unsigned long *p_addr)
  266. {
  267. return __bm_unmap(p_addr, KM_IRQ1);
  268. }
  269. /* long word offset of _bitmap_ sector */
  270. #define S2W(s) ((s)<<(BM_EXT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL))
  271. /* word offset from start of bitmap to word number _in_page_
  272. * modulo longs per page
  273. #define MLPP(X) ((X) % (PAGE_SIZE/sizeof(long))
  274. hm, well, Philipp thinks gcc might not optimze the % into & (... - 1)
  275. so do it explicitly:
  276. */
  277. #define MLPP(X) ((X) & ((PAGE_SIZE/sizeof(long))-1))
  278. /* Long words per page */
  279. #define LWPP (PAGE_SIZE/sizeof(long))
  280. /*
  281. * actually most functions herein should take a struct drbd_bitmap*, not a
  282. * struct drbd_conf*, but for the debug macros I like to have the mdev around
  283. * to be able to report device specific.
  284. */
  285. static void bm_free_pages(struct page **pages, unsigned long number)
  286. {
  287. unsigned long i;
  288. if (!pages)
  289. return;
  290. for (i = 0; i < number; i++) {
  291. if (!pages[i]) {
  292. printk(KERN_ALERT "drbd: bm_free_pages tried to free "
  293. "a NULL pointer; i=%lu n=%lu\n",
  294. i, number);
  295. continue;
  296. }
  297. __free_page(pages[i]);
  298. pages[i] = NULL;
  299. }
  300. }
  301. static void bm_vk_free(void *ptr, int v)
  302. {
  303. if (v)
  304. vfree(ptr);
  305. else
  306. kfree(ptr);
  307. }
  308. /*
  309. * "have" and "want" are NUMBER OF PAGES.
  310. */
  311. static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
  312. {
  313. struct page **old_pages = b->bm_pages;
  314. struct page **new_pages, *page;
  315. unsigned int i, bytes, vmalloced = 0;
  316. unsigned long have = b->bm_number_of_pages;
  317. BUG_ON(have == 0 && old_pages != NULL);
  318. BUG_ON(have != 0 && old_pages == NULL);
  319. if (have == want)
  320. return old_pages;
  321. /* Trying kmalloc first, falling back to vmalloc.
  322. * GFP_KERNEL is ok, as this is done when a lower level disk is
  323. * "attached" to the drbd. Context is receiver thread or cqueue
  324. * thread. As we have no disk yet, we are not in the IO path,
  325. * not even the IO path of the peer. */
  326. bytes = sizeof(struct page *)*want;
  327. new_pages = kmalloc(bytes, GFP_KERNEL);
  328. if (!new_pages) {
  329. new_pages = vmalloc(bytes);
  330. if (!new_pages)
  331. return NULL;
  332. vmalloced = 1;
  333. }
  334. memset(new_pages, 0, bytes);
  335. if (want >= have) {
  336. for (i = 0; i < have; i++)
  337. new_pages[i] = old_pages[i];
  338. for (; i < want; i++) {
  339. page = alloc_page(GFP_HIGHUSER);
  340. if (!page) {
  341. bm_free_pages(new_pages + have, i - have);
  342. bm_vk_free(new_pages, vmalloced);
  343. return NULL;
  344. }
  345. /* we want to know which page it is
  346. * from the endio handlers */
  347. bm_store_page_idx(page, i);
  348. new_pages[i] = page;
  349. }
  350. } else {
  351. for (i = 0; i < want; i++)
  352. new_pages[i] = old_pages[i];
  353. /* NOT HERE, we are outside the spinlock!
  354. bm_free_pages(old_pages + want, have - want);
  355. */
  356. }
  357. if (vmalloced)
  358. b->bm_flags |= BM_P_VMALLOCED;
  359. else
  360. b->bm_flags &= ~BM_P_VMALLOCED;
  361. return new_pages;
  362. }
  363. /*
  364. * called on driver init only. TODO call when a device is created.
  365. * allocates the drbd_bitmap, and stores it in mdev->bitmap.
  366. */
  367. int drbd_bm_init(struct drbd_conf *mdev)
  368. {
  369. struct drbd_bitmap *b = mdev->bitmap;
  370. WARN_ON(b != NULL);
  371. b = kzalloc(sizeof(struct drbd_bitmap), GFP_KERNEL);
  372. if (!b)
  373. return -ENOMEM;
  374. spin_lock_init(&b->bm_lock);
  375. mutex_init(&b->bm_change);
  376. init_waitqueue_head(&b->bm_io_wait);
  377. mdev->bitmap = b;
  378. return 0;
  379. }
  380. sector_t drbd_bm_capacity(struct drbd_conf *mdev)
  381. {
  382. ERR_IF(!mdev->bitmap) return 0;
  383. return mdev->bitmap->bm_dev_capacity;
  384. }
  385. /* called on driver unload. TODO: call when a device is destroyed.
  386. */
  387. void drbd_bm_cleanup(struct drbd_conf *mdev)
  388. {
  389. ERR_IF (!mdev->bitmap) return;
  390. bm_free_pages(mdev->bitmap->bm_pages, mdev->bitmap->bm_number_of_pages);
  391. bm_vk_free(mdev->bitmap->bm_pages, (BM_P_VMALLOCED & mdev->bitmap->bm_flags));
  392. kfree(mdev->bitmap);
  393. mdev->bitmap = NULL;
  394. }
  395. /*
  396. * since (b->bm_bits % BITS_PER_LONG) != 0,
  397. * this masks out the remaining bits.
  398. * Returns the number of bits cleared.
  399. */
  400. #define BITS_PER_PAGE (1UL << (PAGE_SHIFT + 3))
  401. #define BITS_PER_PAGE_MASK (BITS_PER_PAGE - 1)
  402. #define BITS_PER_LONG_MASK (BITS_PER_LONG - 1)
  403. static int bm_clear_surplus(struct drbd_bitmap *b)
  404. {
  405. unsigned long mask;
  406. unsigned long *p_addr, *bm;
  407. int tmp;
  408. int cleared = 0;
  409. /* number of bits modulo bits per page */
  410. tmp = (b->bm_bits & BITS_PER_PAGE_MASK);
  411. /* mask the used bits of the word containing the last bit */
  412. mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1;
  413. /* bitmap is always stored little endian,
  414. * on disk and in core memory alike */
  415. mask = cpu_to_lel(mask);
  416. p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1);
  417. bm = p_addr + (tmp/BITS_PER_LONG);
  418. if (mask) {
  419. /* If mask != 0, we are not exactly aligned, so bm now points
  420. * to the long containing the last bit.
  421. * If mask == 0, bm already points to the word immediately
  422. * after the last (long word aligned) bit. */
  423. cleared = hweight_long(*bm & ~mask);
  424. *bm &= mask;
  425. bm++;
  426. }
  427. if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) {
  428. /* on a 32bit arch, we may need to zero out
  429. * a padding long to align with a 64bit remote */
  430. cleared += hweight_long(*bm);
  431. *bm = 0;
  432. }
  433. bm_unmap(p_addr);
  434. return cleared;
  435. }
  436. static void bm_set_surplus(struct drbd_bitmap *b)
  437. {
  438. unsigned long mask;
  439. unsigned long *p_addr, *bm;
  440. int tmp;
  441. /* number of bits modulo bits per page */
  442. tmp = (b->bm_bits & BITS_PER_PAGE_MASK);
  443. /* mask the used bits of the word containing the last bit */
  444. mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1;
  445. /* bitmap is always stored little endian,
  446. * on disk and in core memory alike */
  447. mask = cpu_to_lel(mask);
  448. p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1);
  449. bm = p_addr + (tmp/BITS_PER_LONG);
  450. if (mask) {
  451. /* If mask != 0, we are not exactly aligned, so bm now points
  452. * to the long containing the last bit.
  453. * If mask == 0, bm already points to the word immediately
  454. * after the last (long word aligned) bit. */
  455. *bm |= ~mask;
  456. bm++;
  457. }
  458. if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) {
  459. /* on a 32bit arch, we may need to zero out
  460. * a padding long to align with a 64bit remote */
  461. *bm = ~0UL;
  462. }
  463. bm_unmap(p_addr);
  464. }
  465. /* you better not modify the bitmap while this is running,
  466. * or its results will be stale */
  467. static unsigned long bm_count_bits(struct drbd_bitmap *b)
  468. {
  469. unsigned long *p_addr;
  470. unsigned long bits = 0;
  471. unsigned long mask = (1UL << (b->bm_bits & BITS_PER_LONG_MASK)) -1;
  472. int idx, i, last_word;
  473. /* all but last page */
  474. for (idx = 0; idx < b->bm_number_of_pages - 1; idx++) {
  475. p_addr = __bm_map_pidx(b, idx, KM_USER0);
  476. for (i = 0; i < LWPP; i++)
  477. bits += hweight_long(p_addr[i]);
  478. __bm_unmap(p_addr, KM_USER0);
  479. cond_resched();
  480. }
  481. /* last (or only) page */
  482. last_word = ((b->bm_bits - 1) & BITS_PER_PAGE_MASK) >> LN2_BPL;
  483. p_addr = __bm_map_pidx(b, idx, KM_USER0);
  484. for (i = 0; i < last_word; i++)
  485. bits += hweight_long(p_addr[i]);
  486. p_addr[last_word] &= cpu_to_lel(mask);
  487. bits += hweight_long(p_addr[last_word]);
  488. /* 32bit arch, may have an unused padding long */
  489. if (BITS_PER_LONG == 32 && (last_word & 1) == 0)
  490. p_addr[last_word+1] = 0;
  491. __bm_unmap(p_addr, KM_USER0);
  492. return bits;
  493. }
  494. /* offset and len in long words.*/
  495. static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len)
  496. {
  497. unsigned long *p_addr, *bm;
  498. unsigned int idx;
  499. size_t do_now, end;
  500. end = offset + len;
  501. if (end > b->bm_words) {
  502. printk(KERN_ALERT "drbd: bm_memset end > bm_words\n");
  503. return;
  504. }
  505. while (offset < end) {
  506. do_now = min_t(size_t, ALIGN(offset + 1, LWPP), end) - offset;
  507. idx = bm_word_to_page_idx(b, offset);
  508. p_addr = bm_map_pidx(b, idx);
  509. bm = p_addr + MLPP(offset);
  510. if (bm+do_now > p_addr + LWPP) {
  511. printk(KERN_ALERT "drbd: BUG BUG BUG! p_addr:%p bm:%p do_now:%d\n",
  512. p_addr, bm, (int)do_now);
  513. } else
  514. memset(bm, c, do_now * sizeof(long));
  515. bm_unmap(p_addr);
  516. bm_set_page_need_writeout(b->bm_pages[idx]);
  517. offset += do_now;
  518. }
  519. }
  520. /*
  521. * make sure the bitmap has enough room for the attached storage,
  522. * if necessary, resize.
  523. * called whenever we may have changed the device size.
  524. * returns -ENOMEM if we could not allocate enough memory, 0 on success.
  525. * In case this is actually a resize, we copy the old bitmap into the new one.
  526. * Otherwise, the bitmap is initialized to all bits set.
  527. */
  528. int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits)
  529. {
  530. struct drbd_bitmap *b = mdev->bitmap;
  531. unsigned long bits, words, owords, obits;
  532. unsigned long want, have, onpages; /* number of pages */
  533. struct page **npages, **opages = NULL;
  534. int err = 0, growing;
  535. int opages_vmalloced;
  536. ERR_IF(!b) return -ENOMEM;
  537. drbd_bm_lock(mdev, "resize", BM_LOCKED_MASK);
  538. dev_info(DEV, "drbd_bm_resize called with capacity == %llu\n",
  539. (unsigned long long)capacity);
  540. if (capacity == b->bm_dev_capacity)
  541. goto out;
  542. opages_vmalloced = (BM_P_VMALLOCED & b->bm_flags);
  543. if (capacity == 0) {
  544. spin_lock_irq(&b->bm_lock);
  545. opages = b->bm_pages;
  546. onpages = b->bm_number_of_pages;
  547. owords = b->bm_words;
  548. b->bm_pages = NULL;
  549. b->bm_number_of_pages =
  550. b->bm_set =
  551. b->bm_bits =
  552. b->bm_words =
  553. b->bm_dev_capacity = 0;
  554. spin_unlock_irq(&b->bm_lock);
  555. bm_free_pages(opages, onpages);
  556. bm_vk_free(opages, opages_vmalloced);
  557. goto out;
  558. }
  559. bits = BM_SECT_TO_BIT(ALIGN(capacity, BM_SECT_PER_BIT));
  560. /* if we would use
  561. words = ALIGN(bits,BITS_PER_LONG) >> LN2_BPL;
  562. a 32bit host could present the wrong number of words
  563. to a 64bit host.
  564. */
  565. words = ALIGN(bits, 64) >> LN2_BPL;
  566. if (get_ldev(mdev)) {
  567. u64 bits_on_disk = ((u64)mdev->ldev->md.md_size_sect-MD_BM_OFFSET) << 12;
  568. put_ldev(mdev);
  569. if (bits > bits_on_disk) {
  570. dev_info(DEV, "bits = %lu\n", bits);
  571. dev_info(DEV, "bits_on_disk = %llu\n", bits_on_disk);
  572. err = -ENOSPC;
  573. goto out;
  574. }
  575. }
  576. want = ALIGN(words*sizeof(long), PAGE_SIZE) >> PAGE_SHIFT;
  577. have = b->bm_number_of_pages;
  578. if (want == have) {
  579. D_ASSERT(b->bm_pages != NULL);
  580. npages = b->bm_pages;
  581. } else {
  582. if (drbd_insert_fault(mdev, DRBD_FAULT_BM_ALLOC))
  583. npages = NULL;
  584. else
  585. npages = bm_realloc_pages(b, want);
  586. }
  587. if (!npages) {
  588. err = -ENOMEM;
  589. goto out;
  590. }
  591. spin_lock_irq(&b->bm_lock);
  592. opages = b->bm_pages;
  593. owords = b->bm_words;
  594. obits = b->bm_bits;
  595. growing = bits > obits;
  596. if (opages && growing && set_new_bits)
  597. bm_set_surplus(b);
  598. b->bm_pages = npages;
  599. b->bm_number_of_pages = want;
  600. b->bm_bits = bits;
  601. b->bm_words = words;
  602. b->bm_dev_capacity = capacity;
  603. if (growing) {
  604. if (set_new_bits) {
  605. bm_memset(b, owords, 0xff, words-owords);
  606. b->bm_set += bits - obits;
  607. } else
  608. bm_memset(b, owords, 0x00, words-owords);
  609. }
  610. if (want < have) {
  611. /* implicit: (opages != NULL) && (opages != npages) */
  612. bm_free_pages(opages + want, have - want);
  613. }
  614. (void)bm_clear_surplus(b);
  615. spin_unlock_irq(&b->bm_lock);
  616. if (opages != npages)
  617. bm_vk_free(opages, opages_vmalloced);
  618. if (!growing)
  619. b->bm_set = bm_count_bits(b);
  620. dev_info(DEV, "resync bitmap: bits=%lu words=%lu pages=%lu\n", bits, words, want);
  621. out:
  622. drbd_bm_unlock(mdev);
  623. return err;
  624. }
  625. /* inherently racy:
  626. * if not protected by other means, return value may be out of date when
  627. * leaving this function...
  628. * we still need to lock it, since it is important that this returns
  629. * bm_set == 0 precisely.
  630. *
  631. * maybe bm_set should be atomic_t ?
  632. */
  633. unsigned long _drbd_bm_total_weight(struct drbd_conf *mdev)
  634. {
  635. struct drbd_bitmap *b = mdev->bitmap;
  636. unsigned long s;
  637. unsigned long flags;
  638. ERR_IF(!b) return 0;
  639. ERR_IF(!b->bm_pages) return 0;
  640. spin_lock_irqsave(&b->bm_lock, flags);
  641. s = b->bm_set;
  642. spin_unlock_irqrestore(&b->bm_lock, flags);
  643. return s;
  644. }
  645. unsigned long drbd_bm_total_weight(struct drbd_conf *mdev)
  646. {
  647. unsigned long s;
  648. /* if I don't have a disk, I don't know about out-of-sync status */
  649. if (!get_ldev_if_state(mdev, D_NEGOTIATING))
  650. return 0;
  651. s = _drbd_bm_total_weight(mdev);
  652. put_ldev(mdev);
  653. return s;
  654. }
  655. size_t drbd_bm_words(struct drbd_conf *mdev)
  656. {
  657. struct drbd_bitmap *b = mdev->bitmap;
  658. ERR_IF(!b) return 0;
  659. ERR_IF(!b->bm_pages) return 0;
  660. return b->bm_words;
  661. }
  662. unsigned long drbd_bm_bits(struct drbd_conf *mdev)
  663. {
  664. struct drbd_bitmap *b = mdev->bitmap;
  665. ERR_IF(!b) return 0;
  666. return b->bm_bits;
  667. }
  668. /* merge number words from buffer into the bitmap starting at offset.
  669. * buffer[i] is expected to be little endian unsigned long.
  670. * bitmap must be locked by drbd_bm_lock.
  671. * currently only used from receive_bitmap.
  672. */
  673. void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, size_t number,
  674. unsigned long *buffer)
  675. {
  676. struct drbd_bitmap *b = mdev->bitmap;
  677. unsigned long *p_addr, *bm;
  678. unsigned long word, bits;
  679. unsigned int idx;
  680. size_t end, do_now;
  681. end = offset + number;
  682. ERR_IF(!b) return;
  683. ERR_IF(!b->bm_pages) return;
  684. if (number == 0)
  685. return;
  686. WARN_ON(offset >= b->bm_words);
  687. WARN_ON(end > b->bm_words);
  688. spin_lock_irq(&b->bm_lock);
  689. while (offset < end) {
  690. do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset;
  691. idx = bm_word_to_page_idx(b, offset);
  692. p_addr = bm_map_pidx(b, idx);
  693. bm = p_addr + MLPP(offset);
  694. offset += do_now;
  695. while (do_now--) {
  696. bits = hweight_long(*bm);
  697. word = *bm | *buffer++;
  698. *bm++ = word;
  699. b->bm_set += hweight_long(word) - bits;
  700. }
  701. bm_unmap(p_addr);
  702. bm_set_page_need_writeout(b->bm_pages[idx]);
  703. }
  704. /* with 32bit <-> 64bit cross-platform connect
  705. * this is only correct for current usage,
  706. * where we _know_ that we are 64 bit aligned,
  707. * and know that this function is used in this way, too...
  708. */
  709. if (end == b->bm_words)
  710. b->bm_set -= bm_clear_surplus(b);
  711. spin_unlock_irq(&b->bm_lock);
  712. }
  713. /* copy number words from the bitmap starting at offset into the buffer.
  714. * buffer[i] will be little endian unsigned long.
  715. */
  716. void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset, size_t number,
  717. unsigned long *buffer)
  718. {
  719. struct drbd_bitmap *b = mdev->bitmap;
  720. unsigned long *p_addr, *bm;
  721. size_t end, do_now;
  722. end = offset + number;
  723. ERR_IF(!b) return;
  724. ERR_IF(!b->bm_pages) return;
  725. spin_lock_irq(&b->bm_lock);
  726. if ((offset >= b->bm_words) ||
  727. (end > b->bm_words) ||
  728. (number <= 0))
  729. dev_err(DEV, "offset=%lu number=%lu bm_words=%lu\n",
  730. (unsigned long) offset,
  731. (unsigned long) number,
  732. (unsigned long) b->bm_words);
  733. else {
  734. while (offset < end) {
  735. do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset;
  736. p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, offset));
  737. bm = p_addr + MLPP(offset);
  738. offset += do_now;
  739. while (do_now--)
  740. *buffer++ = *bm++;
  741. bm_unmap(p_addr);
  742. }
  743. }
  744. spin_unlock_irq(&b->bm_lock);
  745. }
  746. /* set all bits in the bitmap */
  747. void drbd_bm_set_all(struct drbd_conf *mdev)
  748. {
  749. struct drbd_bitmap *b = mdev->bitmap;
  750. ERR_IF(!b) return;
  751. ERR_IF(!b->bm_pages) return;
  752. spin_lock_irq(&b->bm_lock);
  753. bm_memset(b, 0, 0xff, b->bm_words);
  754. (void)bm_clear_surplus(b);
  755. b->bm_set = b->bm_bits;
  756. spin_unlock_irq(&b->bm_lock);
  757. }
  758. /* clear all bits in the bitmap */
  759. void drbd_bm_clear_all(struct drbd_conf *mdev)
  760. {
  761. struct drbd_bitmap *b = mdev->bitmap;
  762. ERR_IF(!b) return;
  763. ERR_IF(!b->bm_pages) return;
  764. spin_lock_irq(&b->bm_lock);
  765. bm_memset(b, 0, 0, b->bm_words);
  766. b->bm_set = 0;
  767. spin_unlock_irq(&b->bm_lock);
  768. }
  769. struct bm_aio_ctx {
  770. struct drbd_conf *mdev;
  771. atomic_t in_flight;
  772. struct completion done;
  773. unsigned flags;
  774. #define BM_AIO_COPY_PAGES 1
  775. int error;
  776. };
  777. /* bv_page may be a copy, or may be the original */
  778. static void bm_async_io_complete(struct bio *bio, int error)
  779. {
  780. struct bm_aio_ctx *ctx = bio->bi_private;
  781. struct drbd_conf *mdev = ctx->mdev;
  782. struct drbd_bitmap *b = mdev->bitmap;
  783. unsigned int idx = bm_page_to_idx(bio->bi_io_vec[0].bv_page);
  784. int uptodate = bio_flagged(bio, BIO_UPTODATE);
  785. /* strange behavior of some lower level drivers...
  786. * fail the request by clearing the uptodate flag,
  787. * but do not return any error?!
  788. * do we want to WARN() on this? */
  789. if (!error && !uptodate)
  790. error = -EIO;
  791. if ((ctx->flags & BM_AIO_COPY_PAGES) == 0 &&
  792. !bm_test_page_unchanged(b->bm_pages[idx]))
  793. dev_warn(DEV, "bitmap page idx %u changed during IO!\n", idx);
  794. if (error) {
  795. /* ctx error will hold the completed-last non-zero error code,
  796. * in case error codes differ. */
  797. ctx->error = error;
  798. bm_set_page_io_err(b->bm_pages[idx]);
  799. /* Not identical to on disk version of it.
  800. * Is BM_PAGE_IO_ERROR enough? */
  801. if (__ratelimit(&drbd_ratelimit_state))
  802. dev_err(DEV, "IO ERROR %d on bitmap page idx %u\n",
  803. error, idx);
  804. } else {
  805. bm_clear_page_io_err(b->bm_pages[idx]);
  806. dynamic_dev_dbg(DEV, "bitmap page idx %u completed\n", idx);
  807. }
  808. bm_page_unlock_io(mdev, idx);
  809. /* FIXME give back to page pool */
  810. if (ctx->flags & BM_AIO_COPY_PAGES)
  811. put_page(bio->bi_io_vec[0].bv_page);
  812. bio_put(bio);
  813. if (atomic_dec_and_test(&ctx->in_flight))
  814. complete(&ctx->done);
  815. }
  816. static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must_hold(local)
  817. {
  818. /* we are process context. we always get a bio */
  819. struct bio *bio = bio_alloc(GFP_KERNEL, 1);
  820. struct drbd_conf *mdev = ctx->mdev;
  821. struct drbd_bitmap *b = mdev->bitmap;
  822. struct page *page;
  823. unsigned int len;
  824. sector_t on_disk_sector =
  825. mdev->ldev->md.md_offset + mdev->ldev->md.bm_offset;
  826. on_disk_sector += ((sector_t)page_nr) << (PAGE_SHIFT-9);
  827. /* this might happen with very small
  828. * flexible external meta data device,
  829. * or with PAGE_SIZE > 4k */
  830. len = min_t(unsigned int, PAGE_SIZE,
  831. (drbd_md_last_sector(mdev->ldev) - on_disk_sector + 1)<<9);
  832. /* serialize IO on this page */
  833. bm_page_lock_io(mdev, page_nr);
  834. /* before memcpy and submit,
  835. * so it can be redirtied any time */
  836. bm_set_page_unchanged(b->bm_pages[page_nr]);
  837. if (ctx->flags & BM_AIO_COPY_PAGES) {
  838. /* FIXME alloc_page is good enough for now, but actually needs
  839. * to use pre-allocated page pool */
  840. void *src, *dest;
  841. page = alloc_page(__GFP_HIGHMEM|__GFP_WAIT);
  842. dest = kmap_atomic(page, KM_USER0);
  843. src = kmap_atomic(b->bm_pages[page_nr], KM_USER1);
  844. memcpy(dest, src, PAGE_SIZE);
  845. kunmap_atomic(src, KM_USER1);
  846. kunmap_atomic(dest, KM_USER0);
  847. bm_store_page_idx(page, page_nr);
  848. } else
  849. page = b->bm_pages[page_nr];
  850. bio->bi_bdev = mdev->ldev->md_bdev;
  851. bio->bi_sector = on_disk_sector;
  852. bio_add_page(bio, page, len, 0);
  853. bio->bi_private = ctx;
  854. bio->bi_end_io = bm_async_io_complete;
  855. if (drbd_insert_fault(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
  856. bio->bi_rw |= rw;
  857. bio_endio(bio, -EIO);
  858. } else {
  859. submit_bio(rw, bio);
  860. }
  861. }
  862. /*
  863. * bm_rw: read/write the whole bitmap from/to its on disk location.
  864. */
  865. static int bm_rw(struct drbd_conf *mdev, int rw, unsigned lazy_writeout_upper_idx) __must_hold(local)
  866. {
  867. struct bm_aio_ctx ctx = {
  868. .mdev = mdev,
  869. .in_flight = ATOMIC_INIT(1),
  870. .done = COMPLETION_INITIALIZER_ONSTACK(ctx.done),
  871. .flags = lazy_writeout_upper_idx ? BM_AIO_COPY_PAGES : 0,
  872. };
  873. struct drbd_bitmap *b = mdev->bitmap;
  874. int num_pages, i, count = 0;
  875. unsigned long now;
  876. char ppb[10];
  877. int err = 0;
  878. /*
  879. * We are protected against bitmap disappearing/resizing by holding an
  880. * ldev reference (caller must have called get_ldev()).
  881. * For read/write, we are protected against changes to the bitmap by
  882. * the bitmap lock (see drbd_bitmap_io).
  883. * For lazy writeout, we don't care for ongoing changes to the bitmap,
  884. * as we submit copies of pages anyways.
  885. */
  886. if (!ctx.flags)
  887. WARN_ON(!(BM_LOCKED_MASK & b->bm_flags));
  888. num_pages = b->bm_number_of_pages;
  889. now = jiffies;
  890. /* let the layers below us try to merge these bios... */
  891. for (i = 0; i < num_pages; i++) {
  892. /* ignore completely unchanged pages */
  893. if (lazy_writeout_upper_idx && i == lazy_writeout_upper_idx)
  894. break;
  895. if (rw & WRITE) {
  896. if (bm_test_page_unchanged(b->bm_pages[i])) {
  897. dynamic_dev_dbg(DEV, "skipped bm write for idx %u\n", i);
  898. continue;
  899. }
  900. /* during lazy writeout,
  901. * ignore those pages not marked for lazy writeout. */
  902. if (lazy_writeout_upper_idx &&
  903. !bm_test_page_lazy_writeout(b->bm_pages[i])) {
  904. dynamic_dev_dbg(DEV, "skipped bm lazy write for idx %u\n", i);
  905. continue;
  906. }
  907. }
  908. atomic_inc(&ctx.in_flight);
  909. bm_page_io_async(&ctx, i, rw);
  910. ++count;
  911. cond_resched();
  912. }
  913. /*
  914. * We initialize ctx.in_flight to one to make sure bm_async_io_complete
  915. * will not complete() early, and decrement / test it here. If there
  916. * are still some bios in flight, we need to wait for them here.
  917. */
  918. if (!atomic_dec_and_test(&ctx.in_flight))
  919. wait_for_completion(&ctx.done);
  920. dev_info(DEV, "bitmap %s of %u pages took %lu jiffies\n",
  921. rw == WRITE ? "WRITE" : "READ",
  922. count, jiffies - now);
  923. if (ctx.error) {
  924. dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n");
  925. drbd_chk_io_error(mdev, 1, true);
  926. err = -EIO; /* ctx.error ? */
  927. }
  928. now = jiffies;
  929. if (rw == WRITE) {
  930. drbd_md_flush(mdev);
  931. } else /* rw == READ */ {
  932. b->bm_set = bm_count_bits(b);
  933. dev_info(DEV, "recounting of set bits took additional %lu jiffies\n",
  934. jiffies - now);
  935. }
  936. now = b->bm_set;
  937. dev_info(DEV, "%s (%lu bits) marked out-of-sync by on disk bit-map.\n",
  938. ppsize(ppb, now << (BM_BLOCK_SHIFT-10)), now);
  939. return err;
  940. }
  941. /**
  942. * drbd_bm_read() - Read the whole bitmap from its on disk location.
  943. * @mdev: DRBD device.
  944. */
  945. int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local)
  946. {
  947. return bm_rw(mdev, READ, 0);
  948. }
  949. /**
  950. * drbd_bm_write() - Write the whole bitmap to its on disk location.
  951. * @mdev: DRBD device.
  952. *
  953. * Will only write pages that have changed since last IO.
  954. */
  955. int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local)
  956. {
  957. return bm_rw(mdev, WRITE, 0);
  958. }
  959. /**
  960. * drbd_bm_lazy_write_out() - Write bitmap pages 0 to @upper_idx-1, if they have changed.
  961. * @mdev: DRBD device.
  962. * @upper_idx: 0: write all changed pages; +ve: page index to stop scanning for changed pages
  963. */
  964. int drbd_bm_write_lazy(struct drbd_conf *mdev, unsigned upper_idx) __must_hold(local)
  965. {
  966. return bm_rw(mdev, WRITE, upper_idx);
  967. }
  968. /**
  969. * drbd_bm_write_page: Writes a PAGE_SIZE aligned piece of bitmap
  970. * @mdev: DRBD device.
  971. * @idx: bitmap page index
  972. *
  973. * We don't want to special case on logical_block_size of the backend device,
  974. * so we submit PAGE_SIZE aligned pieces.
  975. * Note that on "most" systems, PAGE_SIZE is 4k.
  976. *
  977. * In case this becomes an issue on systems with larger PAGE_SIZE,
  978. * we may want to change this again to write 4k aligned 4k pieces.
  979. */
  980. int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local)
  981. {
  982. struct bm_aio_ctx ctx = {
  983. .mdev = mdev,
  984. .in_flight = ATOMIC_INIT(1),
  985. .done = COMPLETION_INITIALIZER_ONSTACK(ctx.done),
  986. .flags = BM_AIO_COPY_PAGES,
  987. };
  988. if (bm_test_page_unchanged(mdev->bitmap->bm_pages[idx])) {
  989. dynamic_dev_dbg(DEV, "skipped bm page write for idx %u\n", idx);
  990. return 0;
  991. }
  992. bm_page_io_async(&ctx, idx, WRITE_SYNC);
  993. wait_for_completion(&ctx.done);
  994. if (ctx.error)
  995. drbd_chk_io_error(mdev, 1, true);
  996. /* that should force detach, so the in memory bitmap will be
  997. * gone in a moment as well. */
  998. mdev->bm_writ_cnt++;
  999. return ctx.error;
  1000. }
  1001. /* NOTE
  1002. * find_first_bit returns int, we return unsigned long.
  1003. * For this to work on 32bit arch with bitnumbers > (1<<32),
  1004. * we'd need to return u64, and get a whole lot of other places
  1005. * fixed where we still use unsigned long.
  1006. *
  1007. * this returns a bit number, NOT a sector!
  1008. */
  1009. static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo,
  1010. const int find_zero_bit, const enum km_type km)
  1011. {
  1012. struct drbd_bitmap *b = mdev->bitmap;
  1013. unsigned long *p_addr;
  1014. unsigned long bit_offset;
  1015. unsigned i;
  1016. if (bm_fo > b->bm_bits) {
  1017. dev_err(DEV, "bm_fo=%lu bm_bits=%lu\n", bm_fo, b->bm_bits);
  1018. bm_fo = DRBD_END_OF_BITMAP;
  1019. } else {
  1020. while (bm_fo < b->bm_bits) {
  1021. /* bit offset of the first bit in the page */
  1022. bit_offset = bm_fo & ~BITS_PER_PAGE_MASK;
  1023. p_addr = __bm_map_pidx(b, bm_bit_to_page_idx(b, bm_fo), km);
  1024. if (find_zero_bit)
  1025. i = generic_find_next_zero_le_bit(p_addr,
  1026. PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK);
  1027. else
  1028. i = generic_find_next_le_bit(p_addr,
  1029. PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK);
  1030. __bm_unmap(p_addr, km);
  1031. if (i < PAGE_SIZE*8) {
  1032. bm_fo = bit_offset + i;
  1033. if (bm_fo >= b->bm_bits)
  1034. break;
  1035. goto found;
  1036. }
  1037. bm_fo = bit_offset + PAGE_SIZE*8;
  1038. }
  1039. bm_fo = DRBD_END_OF_BITMAP;
  1040. }
  1041. found:
  1042. return bm_fo;
  1043. }
  1044. static unsigned long bm_find_next(struct drbd_conf *mdev,
  1045. unsigned long bm_fo, const int find_zero_bit)
  1046. {
  1047. struct drbd_bitmap *b = mdev->bitmap;
  1048. unsigned long i = DRBD_END_OF_BITMAP;
  1049. ERR_IF(!b) return i;
  1050. ERR_IF(!b->bm_pages) return i;
  1051. spin_lock_irq(&b->bm_lock);
  1052. if (BM_DONT_TEST & b->bm_flags)
  1053. bm_print_lock_info(mdev);
  1054. i = __bm_find_next(mdev, bm_fo, find_zero_bit, KM_IRQ1);
  1055. spin_unlock_irq(&b->bm_lock);
  1056. return i;
  1057. }
  1058. unsigned long drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo)
  1059. {
  1060. return bm_find_next(mdev, bm_fo, 0);
  1061. }
  1062. #if 0
  1063. /* not yet needed for anything. */
  1064. unsigned long drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo)
  1065. {
  1066. return bm_find_next(mdev, bm_fo, 1);
  1067. }
  1068. #endif
  1069. /* does not spin_lock_irqsave.
  1070. * you must take drbd_bm_lock() first */
  1071. unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo)
  1072. {
  1073. /* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */
  1074. return __bm_find_next(mdev, bm_fo, 0, KM_USER1);
  1075. }
  1076. unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo)
  1077. {
  1078. /* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */
  1079. return __bm_find_next(mdev, bm_fo, 1, KM_USER1);
  1080. }
  1081. /* returns number of bits actually changed.
  1082. * for val != 0, we change 0 -> 1, return code positive
  1083. * for val == 0, we change 1 -> 0, return code negative
  1084. * wants bitnr, not sector.
  1085. * expected to be called for only a few bits (e - s about BITS_PER_LONG).
  1086. * Must hold bitmap lock already. */
  1087. static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
  1088. unsigned long e, int val, const enum km_type km)
  1089. {
  1090. struct drbd_bitmap *b = mdev->bitmap;
  1091. unsigned long *p_addr = NULL;
  1092. unsigned long bitnr;
  1093. unsigned int last_page_nr = -1U;
  1094. int c = 0;
  1095. int changed_total = 0;
  1096. if (e >= b->bm_bits) {
  1097. dev_err(DEV, "ASSERT FAILED: bit_s=%lu bit_e=%lu bm_bits=%lu\n",
  1098. s, e, b->bm_bits);
  1099. e = b->bm_bits ? b->bm_bits -1 : 0;
  1100. }
  1101. for (bitnr = s; bitnr <= e; bitnr++) {
  1102. unsigned int page_nr = bm_bit_to_page_idx(b, bitnr);
  1103. if (page_nr != last_page_nr) {
  1104. if (p_addr)
  1105. __bm_unmap(p_addr, km);
  1106. if (c < 0)
  1107. bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
  1108. else if (c > 0)
  1109. bm_set_page_need_writeout(b->bm_pages[last_page_nr]);
  1110. changed_total += c;
  1111. c = 0;
  1112. p_addr = __bm_map_pidx(b, page_nr, km);
  1113. last_page_nr = page_nr;
  1114. }
  1115. if (val)
  1116. c += (0 == generic___test_and_set_le_bit(bitnr & BITS_PER_PAGE_MASK, p_addr));
  1117. else
  1118. c -= (0 != generic___test_and_clear_le_bit(bitnr & BITS_PER_PAGE_MASK, p_addr));
  1119. }
  1120. if (p_addr)
  1121. __bm_unmap(p_addr, km);
  1122. if (c < 0)
  1123. bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
  1124. else if (c > 0)
  1125. bm_set_page_need_writeout(b->bm_pages[last_page_nr]);
  1126. changed_total += c;
  1127. b->bm_set += changed_total;
  1128. return changed_total;
  1129. }
  1130. /* returns number of bits actually changed.
  1131. * for val != 0, we change 0 -> 1, return code positive
  1132. * for val == 0, we change 1 -> 0, return code negative
  1133. * wants bitnr, not sector */
  1134. static int bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
  1135. const unsigned long e, int val)
  1136. {
  1137. unsigned long flags;
  1138. struct drbd_bitmap *b = mdev->bitmap;
  1139. int c = 0;
  1140. ERR_IF(!b) return 1;
  1141. ERR_IF(!b->bm_pages) return 0;
  1142. spin_lock_irqsave(&b->bm_lock, flags);
  1143. if ((val ? BM_DONT_SET : BM_DONT_CLEAR) & b->bm_flags)
  1144. bm_print_lock_info(mdev);
  1145. c = __bm_change_bits_to(mdev, s, e, val, KM_IRQ1);
  1146. spin_unlock_irqrestore(&b->bm_lock, flags);
  1147. return c;
  1148. }
  1149. /* returns number of bits changed 0 -> 1 */
  1150. int drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
  1151. {
  1152. return bm_change_bits_to(mdev, s, e, 1);
  1153. }
  1154. /* returns number of bits changed 1 -> 0 */
  1155. int drbd_bm_clear_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
  1156. {
  1157. return -bm_change_bits_to(mdev, s, e, 0);
  1158. }
  1159. /* sets all bits in full words,
  1160. * from first_word up to, but not including, last_word */
  1161. static inline void bm_set_full_words_within_one_page(struct drbd_bitmap *b,
  1162. int page_nr, int first_word, int last_word)
  1163. {
  1164. int i;
  1165. int bits;
  1166. unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr], KM_USER0);
  1167. for (i = first_word; i < last_word; i++) {
  1168. bits = hweight_long(paddr[i]);
  1169. paddr[i] = ~0UL;
  1170. b->bm_set += BITS_PER_LONG - bits;
  1171. }
  1172. kunmap_atomic(paddr, KM_USER0);
  1173. }
  1174. /* Same thing as drbd_bm_set_bits, but without taking the spin_lock_irqsave.
  1175. * You must first drbd_bm_lock().
  1176. * Can be called to set the whole bitmap in one go.
  1177. * Sets bits from s to e _inclusive_. */
  1178. void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
  1179. {
  1180. /* First set_bit from the first bit (s)
  1181. * up to the next long boundary (sl),
  1182. * then assign full words up to the last long boundary (el),
  1183. * then set_bit up to and including the last bit (e).
  1184. *
  1185. * Do not use memset, because we must account for changes,
  1186. * so we need to loop over the words with hweight() anyways.
  1187. */
  1188. unsigned long sl = ALIGN(s,BITS_PER_LONG);
  1189. unsigned long el = (e+1) & ~((unsigned long)BITS_PER_LONG-1);
  1190. int first_page;
  1191. int last_page;
  1192. int page_nr;
  1193. int first_word;
  1194. int last_word;
  1195. if (e - s <= 3*BITS_PER_LONG) {
  1196. /* don't bother; el and sl may even be wrong. */
  1197. __bm_change_bits_to(mdev, s, e, 1, KM_USER0);
  1198. return;
  1199. }
  1200. /* difference is large enough that we can trust sl and el */
  1201. /* bits filling the current long */
  1202. if (sl)
  1203. __bm_change_bits_to(mdev, s, sl-1, 1, KM_USER0);
  1204. first_page = sl >> (3 + PAGE_SHIFT);
  1205. last_page = el >> (3 + PAGE_SHIFT);
  1206. /* MLPP: modulo longs per page */
  1207. /* LWPP: long words per page */
  1208. first_word = MLPP(sl >> LN2_BPL);
  1209. last_word = LWPP;
  1210. /* first and full pages, unless first page == last page */
  1211. for (page_nr = first_page; page_nr < last_page; page_nr++) {
  1212. bm_set_full_words_within_one_page(mdev->bitmap, page_nr, first_word, last_word);
  1213. cond_resched();
  1214. first_word = 0;
  1215. }
  1216. /* last page (respectively only page, for first page == last page) */
  1217. last_word = MLPP(el >> LN2_BPL);
  1218. bm_set_full_words_within_one_page(mdev->bitmap, last_page, first_word, last_word);
  1219. /* possibly trailing bits.
  1220. * example: (e & 63) == 63, el will be e+1.
  1221. * if that even was the very last bit,
  1222. * it would trigger an assert in __bm_change_bits_to()
  1223. */
  1224. if (el <= e)
  1225. __bm_change_bits_to(mdev, el, e, 1, KM_USER0);
  1226. }
  1227. /* returns bit state
  1228. * wants bitnr, NOT sector.
  1229. * inherently racy... area needs to be locked by means of {al,rs}_lru
  1230. * 1 ... bit set
  1231. * 0 ... bit not set
  1232. * -1 ... first out of bounds access, stop testing for bits!
  1233. */
  1234. int drbd_bm_test_bit(struct drbd_conf *mdev, const unsigned long bitnr)
  1235. {
  1236. unsigned long flags;
  1237. struct drbd_bitmap *b = mdev->bitmap;
  1238. unsigned long *p_addr;
  1239. int i;
  1240. ERR_IF(!b) return 0;
  1241. ERR_IF(!b->bm_pages) return 0;
  1242. spin_lock_irqsave(&b->bm_lock, flags);
  1243. if (BM_DONT_TEST & b->bm_flags)
  1244. bm_print_lock_info(mdev);
  1245. if (bitnr < b->bm_bits) {
  1246. p_addr = bm_map_pidx(b, bm_bit_to_page_idx(b, bitnr));
  1247. i = generic_test_le_bit(bitnr & BITS_PER_PAGE_MASK, p_addr) ? 1 : 0;
  1248. bm_unmap(p_addr);
  1249. } else if (bitnr == b->bm_bits) {
  1250. i = -1;
  1251. } else { /* (bitnr > b->bm_bits) */
  1252. dev_err(DEV, "bitnr=%lu > bm_bits=%lu\n", bitnr, b->bm_bits);
  1253. i = 0;
  1254. }
  1255. spin_unlock_irqrestore(&b->bm_lock, flags);
  1256. return i;
  1257. }
  1258. /* returns number of bits set in the range [s, e] */
  1259. int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
  1260. {
  1261. unsigned long flags;
  1262. struct drbd_bitmap *b = mdev->bitmap;
  1263. unsigned long *p_addr = NULL;
  1264. unsigned long bitnr;
  1265. unsigned int page_nr = -1U;
  1266. int c = 0;
  1267. /* If this is called without a bitmap, that is a bug. But just to be
  1268. * robust in case we screwed up elsewhere, in that case pretend there
  1269. * was one dirty bit in the requested area, so we won't try to do a
  1270. * local read there (no bitmap probably implies no disk) */
  1271. ERR_IF(!b) return 1;
  1272. ERR_IF(!b->bm_pages) return 1;
  1273. spin_lock_irqsave(&b->bm_lock, flags);
  1274. if (BM_DONT_TEST & b->bm_flags)
  1275. bm_print_lock_info(mdev);
  1276. for (bitnr = s; bitnr <= e; bitnr++) {
  1277. unsigned int idx = bm_bit_to_page_idx(b, bitnr);
  1278. if (page_nr != idx) {
  1279. page_nr = idx;
  1280. if (p_addr)
  1281. bm_unmap(p_addr);
  1282. p_addr = bm_map_pidx(b, idx);
  1283. }
  1284. ERR_IF (bitnr >= b->bm_bits) {
  1285. dev_err(DEV, "bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits);
  1286. } else {
  1287. c += (0 != generic_test_le_bit(bitnr - (page_nr << (PAGE_SHIFT+3)), p_addr));
  1288. }
  1289. }
  1290. if (p_addr)
  1291. bm_unmap(p_addr);
  1292. spin_unlock_irqrestore(&b->bm_lock, flags);
  1293. return c;
  1294. }
  1295. /* inherently racy...
  1296. * return value may be already out-of-date when this function returns.
  1297. * but the general usage is that this is only use during a cstate when bits are
  1298. * only cleared, not set, and typically only care for the case when the return
  1299. * value is zero, or we already "locked" this "bitmap extent" by other means.
  1300. *
  1301. * enr is bm-extent number, since we chose to name one sector (512 bytes)
  1302. * worth of the bitmap a "bitmap extent".
  1303. *
  1304. * TODO
  1305. * I think since we use it like a reference count, we should use the real
  1306. * reference count of some bitmap extent element from some lru instead...
  1307. *
  1308. */
  1309. int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr)
  1310. {
  1311. struct drbd_bitmap *b = mdev->bitmap;
  1312. int count, s, e;
  1313. unsigned long flags;
  1314. unsigned long *p_addr, *bm;
  1315. ERR_IF(!b) return 0;
  1316. ERR_IF(!b->bm_pages) return 0;
  1317. spin_lock_irqsave(&b->bm_lock, flags);
  1318. if (BM_DONT_TEST & b->bm_flags)
  1319. bm_print_lock_info(mdev);
  1320. s = S2W(enr);
  1321. e = min((size_t)S2W(enr+1), b->bm_words);
  1322. count = 0;
  1323. if (s < b->bm_words) {
  1324. int n = e-s;
  1325. p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, s));
  1326. bm = p_addr + MLPP(s);
  1327. while (n--)
  1328. count += hweight_long(*bm++);
  1329. bm_unmap(p_addr);
  1330. } else {
  1331. dev_err(DEV, "start offset (%d) too large in drbd_bm_e_weight\n", s);
  1332. }
  1333. spin_unlock_irqrestore(&b->bm_lock, flags);
  1334. return count;
  1335. }
  1336. /* Set all bits covered by the AL-extent al_enr.
  1337. * Returns number of bits changed. */
  1338. unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, unsigned long al_enr)
  1339. {
  1340. struct drbd_bitmap *b = mdev->bitmap;
  1341. unsigned long *p_addr, *bm;
  1342. unsigned long weight;
  1343. unsigned long s, e;
  1344. int count, i, do_now;
  1345. ERR_IF(!b) return 0;
  1346. ERR_IF(!b->bm_pages) return 0;
  1347. spin_lock_irq(&b->bm_lock);
  1348. if (BM_DONT_SET & b->bm_flags)
  1349. bm_print_lock_info(mdev);
  1350. weight = b->bm_set;
  1351. s = al_enr * BM_WORDS_PER_AL_EXT;
  1352. e = min_t(size_t, s + BM_WORDS_PER_AL_EXT, b->bm_words);
  1353. /* assert that s and e are on the same page */
  1354. D_ASSERT((e-1) >> (PAGE_SHIFT - LN2_BPL + 3)
  1355. == s >> (PAGE_SHIFT - LN2_BPL + 3));
  1356. count = 0;
  1357. if (s < b->bm_words) {
  1358. i = do_now = e-s;
  1359. p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, s));
  1360. bm = p_addr + MLPP(s);
  1361. while (i--) {
  1362. count += hweight_long(*bm);
  1363. *bm = -1UL;
  1364. bm++;
  1365. }
  1366. bm_unmap(p_addr);
  1367. b->bm_set += do_now*BITS_PER_LONG - count;
  1368. if (e == b->bm_words)
  1369. b->bm_set -= bm_clear_surplus(b);
  1370. } else {
  1371. dev_err(DEV, "start offset (%lu) too large in drbd_bm_ALe_set_all\n", s);
  1372. }
  1373. weight = b->bm_set - weight;
  1374. spin_unlock_irq(&b->bm_lock);
  1375. return weight;
  1376. }