drbd_bitmap.c 44 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580
  1. /*
  2. drbd_bitmap.c
  3. This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
  4. Copyright (C) 2004-2008, LINBIT Information Technologies GmbH.
  5. Copyright (C) 2004-2008, Philipp Reisner <philipp.reisner@linbit.com>.
  6. Copyright (C) 2004-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
  7. drbd is free software; you can redistribute it and/or modify
  8. it under the terms of the GNU General Public License as published by
  9. the Free Software Foundation; either version 2, or (at your option)
  10. any later version.
  11. drbd is distributed in the hope that it will be useful,
  12. but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. GNU General Public License for more details.
  15. You should have received a copy of the GNU General Public License
  16. along with drbd; see the file COPYING. If not, write to
  17. the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  18. */
  19. #include <linux/bitops.h>
  20. #include <linux/vmalloc.h>
  21. #include <linux/string.h>
  22. #include <linux/drbd.h>
  23. #include <linux/slab.h>
  24. #include <asm/kmap_types.h>
  25. #include "drbd_int.h"
  26. /* OPAQUE outside this file!
  27. * interface defined in drbd_int.h
  28. * convention:
  29. * function name drbd_bm_... => used elsewhere, "public".
  30. * function name bm_... => internal to implementation, "private".
  31. */
  32. /*
  33. * LIMITATIONS:
  34. * We want to support >= peta byte of backend storage, while for now still using
  35. * a granularity of one bit per 4KiB of storage.
  36. * 1 << 50 bytes backend storage (1 PiB)
  37. * 1 << (50 - 12) bits needed
  38. * 38 --> we need u64 to index and count bits
  39. * 1 << (38 - 3) bitmap bytes needed
  40. * 35 --> we still need u64 to index and count bytes
  41. * (that's 32 GiB of bitmap for 1 PiB storage)
  42. * 1 << (35 - 2) 32bit longs needed
  43. * 33 --> we'd even need u64 to index and count 32bit long words.
  44. * 1 << (35 - 3) 64bit longs needed
  45. * 32 --> we could get away with a 32bit unsigned int to index and count
  46. * 64bit long words, but I rather stay with unsigned long for now.
  47. * We probably should neither count nor point to bytes or long words
  48. * directly, but either by bitnumber, or by page index and offset.
  49. * 1 << (35 - 12)
  50. * 22 --> we need that much 4KiB pages of bitmap.
  51. * 1 << (22 + 3) --> on a 64bit arch,
  52. * we need 32 MiB to store the array of page pointers.
  53. *
  54. * Because I'm lazy, and because the resulting patch was too large, too ugly
  55. * and still incomplete, on 32bit we still "only" support 16 TiB (minus some),
  56. * (1 << 32) bits * 4k storage.
  57. *
  58. * bitmap storage and IO:
  59. * Bitmap is stored little endian on disk, and is kept little endian in
  60. * core memory. Currently we still hold the full bitmap in core as long
  61. * as we are "attached" to a local disk, which at 32 GiB for 1PiB storage
  62. * seems excessive.
  63. *
  64. * We plan to reduce the amount of in-core bitmap pages by pageing them in
  65. * and out against their on-disk location as necessary, but need to make
  66. * sure we don't cause too much meta data IO, and must not deadlock in
  67. * tight memory situations. This needs some more work.
  68. */
  69. /*
  70. * NOTE
  71. * Access to the *bm_pages is protected by bm_lock.
  72. * It is safe to read the other members within the lock.
  73. *
  74. * drbd_bm_set_bits is called from bio_endio callbacks,
  75. * We may be called with irq already disabled,
  76. * so we need spin_lock_irqsave().
  77. * And we need the kmap_atomic.
  78. */
  79. struct drbd_bitmap {
  80. struct page **bm_pages;
  81. spinlock_t bm_lock;
  82. /* see LIMITATIONS: above */
  83. unsigned long bm_set; /* nr of set bits; THINK maybe atomic_t? */
  84. unsigned long bm_bits;
  85. size_t bm_words;
  86. size_t bm_number_of_pages;
  87. sector_t bm_dev_capacity;
  88. struct mutex bm_change; /* serializes resize operations */
  89. wait_queue_head_t bm_io_wait; /* used to serialize IO of single pages */
  90. enum bm_flag bm_flags;
  91. /* debugging aid, in case we are still racy somewhere */
  92. char *bm_why;
  93. struct task_struct *bm_task;
  94. };
  95. static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
  96. unsigned long e, int val, const enum km_type km);
  97. #define bm_print_lock_info(m) __bm_print_lock_info(m, __func__)
  98. static void __bm_print_lock_info(struct drbd_conf *mdev, const char *func)
  99. {
  100. struct drbd_bitmap *b = mdev->bitmap;
  101. if (!__ratelimit(&drbd_ratelimit_state))
  102. return;
  103. dev_err(DEV, "FIXME %s in %s, bitmap locked for '%s' by %s\n",
  104. current == mdev->receiver.task ? "receiver" :
  105. current == mdev->asender.task ? "asender" :
  106. current == mdev->worker.task ? "worker" : current->comm,
  107. func, b->bm_why ?: "?",
  108. b->bm_task == mdev->receiver.task ? "receiver" :
  109. b->bm_task == mdev->asender.task ? "asender" :
  110. b->bm_task == mdev->worker.task ? "worker" : "?");
  111. }
  112. void drbd_bm_lock(struct drbd_conf *mdev, char *why, enum bm_flag flags)
  113. {
  114. struct drbd_bitmap *b = mdev->bitmap;
  115. int trylock_failed;
  116. if (!b) {
  117. dev_err(DEV, "FIXME no bitmap in drbd_bm_lock!?\n");
  118. return;
  119. }
  120. trylock_failed = !mutex_trylock(&b->bm_change);
  121. if (trylock_failed) {
  122. dev_warn(DEV, "%s going to '%s' but bitmap already locked for '%s' by %s\n",
  123. current == mdev->receiver.task ? "receiver" :
  124. current == mdev->asender.task ? "asender" :
  125. current == mdev->worker.task ? "worker" : current->comm,
  126. why, b->bm_why ?: "?",
  127. b->bm_task == mdev->receiver.task ? "receiver" :
  128. b->bm_task == mdev->asender.task ? "asender" :
  129. b->bm_task == mdev->worker.task ? "worker" : "?");
  130. mutex_lock(&b->bm_change);
  131. }
  132. if (BM_LOCKED_MASK & b->bm_flags)
  133. dev_err(DEV, "FIXME bitmap already locked in bm_lock\n");
  134. b->bm_flags |= flags & BM_LOCKED_MASK;
  135. b->bm_why = why;
  136. b->bm_task = current;
  137. }
  138. void drbd_bm_unlock(struct drbd_conf *mdev)
  139. {
  140. struct drbd_bitmap *b = mdev->bitmap;
  141. if (!b) {
  142. dev_err(DEV, "FIXME no bitmap in drbd_bm_unlock!?\n");
  143. return;
  144. }
  145. if (!(BM_LOCKED_MASK & mdev->bitmap->bm_flags))
  146. dev_err(DEV, "FIXME bitmap not locked in bm_unlock\n");
  147. b->bm_flags &= ~BM_LOCKED_MASK;
  148. b->bm_why = NULL;
  149. b->bm_task = NULL;
  150. mutex_unlock(&b->bm_change);
  151. }
  152. /* we store some "meta" info about our pages in page->private */
  153. /* at a granularity of 4k storage per bitmap bit:
  154. * one peta byte storage: 1<<50 byte, 1<<38 * 4k storage blocks
  155. * 1<<38 bits,
  156. * 1<<23 4k bitmap pages.
  157. * Use 24 bits as page index, covers 2 peta byte storage
  158. * at a granularity of 4k per bit.
  159. * Used to report the failed page idx on io error from the endio handlers.
  160. */
  161. #define BM_PAGE_IDX_MASK ((1UL<<24)-1)
  162. /* this page is currently read in, or written back */
  163. #define BM_PAGE_IO_LOCK 31
  164. /* if there has been an IO error for this page */
  165. #define BM_PAGE_IO_ERROR 30
  166. /* this is to be able to intelligently skip disk IO,
  167. * set if bits have been set since last IO. */
  168. #define BM_PAGE_NEED_WRITEOUT 29
  169. /* to mark for lazy writeout once syncer cleared all clearable bits,
  170. * we if bits have been cleared since last IO. */
  171. #define BM_PAGE_LAZY_WRITEOUT 28
  172. /* store_page_idx uses non-atomic assingment. It is only used directly after
  173. * allocating the page. All other bm_set_page_* and bm_clear_page_* need to
  174. * use atomic bit manipulation, as set_out_of_sync (and therefore bitmap
  175. * changes) may happen from various contexts, and wait_on_bit/wake_up_bit
  176. * requires it all to be atomic as well. */
  177. static void bm_store_page_idx(struct page *page, unsigned long idx)
  178. {
  179. BUG_ON(0 != (idx & ~BM_PAGE_IDX_MASK));
  180. page_private(page) |= idx;
  181. }
  182. static unsigned long bm_page_to_idx(struct page *page)
  183. {
  184. return page_private(page) & BM_PAGE_IDX_MASK;
  185. }
  186. /* As is very unlikely that the same page is under IO from more than one
  187. * context, we can get away with a bit per page and one wait queue per bitmap.
  188. */
  189. static void bm_page_lock_io(struct drbd_conf *mdev, int page_nr)
  190. {
  191. struct drbd_bitmap *b = mdev->bitmap;
  192. void *addr = &page_private(b->bm_pages[page_nr]);
  193. wait_event(b->bm_io_wait, !test_and_set_bit(BM_PAGE_IO_LOCK, addr));
  194. }
  195. static void bm_page_unlock_io(struct drbd_conf *mdev, int page_nr)
  196. {
  197. struct drbd_bitmap *b = mdev->bitmap;
  198. void *addr = &page_private(b->bm_pages[page_nr]);
  199. clear_bit(BM_PAGE_IO_LOCK, addr);
  200. smp_mb__after_clear_bit();
  201. wake_up(&mdev->bitmap->bm_io_wait);
  202. }
  203. /* set _before_ submit_io, so it may be reset due to being changed
  204. * while this page is in flight... will get submitted later again */
  205. static void bm_set_page_unchanged(struct page *page)
  206. {
  207. /* use cmpxchg? */
  208. clear_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page));
  209. clear_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
  210. }
  211. static void bm_set_page_need_writeout(struct page *page)
  212. {
  213. set_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page));
  214. }
  215. static int bm_test_page_unchanged(struct page *page)
  216. {
  217. volatile const unsigned long *addr = &page_private(page);
  218. return (*addr & ((1UL<<BM_PAGE_NEED_WRITEOUT)|(1UL<<BM_PAGE_LAZY_WRITEOUT))) == 0;
  219. }
  220. static void bm_set_page_io_err(struct page *page)
  221. {
  222. set_bit(BM_PAGE_IO_ERROR, &page_private(page));
  223. }
  224. static void bm_clear_page_io_err(struct page *page)
  225. {
  226. clear_bit(BM_PAGE_IO_ERROR, &page_private(page));
  227. }
  228. static void bm_set_page_lazy_writeout(struct page *page)
  229. {
  230. set_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
  231. }
  232. static int bm_test_page_lazy_writeout(struct page *page)
  233. {
  234. return test_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
  235. }
  236. /* on a 32bit box, this would allow for exactly (2<<38) bits. */
  237. static unsigned int bm_word_to_page_idx(struct drbd_bitmap *b, unsigned long long_nr)
  238. {
  239. /* page_nr = (word*sizeof(long)) >> PAGE_SHIFT; */
  240. unsigned int page_nr = long_nr >> (PAGE_SHIFT - LN2_BPL + 3);
  241. BUG_ON(page_nr >= b->bm_number_of_pages);
  242. return page_nr;
  243. }
  244. static unsigned int bm_bit_to_page_idx(struct drbd_bitmap *b, u64 bitnr)
  245. {
  246. /* page_nr = (bitnr/8) >> PAGE_SHIFT; */
  247. unsigned int page_nr = bitnr >> (PAGE_SHIFT + 3);
  248. BUG_ON(page_nr >= b->bm_number_of_pages);
  249. return page_nr;
  250. }
  251. static unsigned long *__bm_map_pidx(struct drbd_bitmap *b, unsigned int idx, const enum km_type km)
  252. {
  253. struct page *page = b->bm_pages[idx];
  254. return (unsigned long *) kmap_atomic(page, km);
  255. }
  256. static unsigned long *bm_map_pidx(struct drbd_bitmap *b, unsigned int idx)
  257. {
  258. return __bm_map_pidx(b, idx, KM_IRQ1);
  259. }
  260. static void __bm_unmap(unsigned long *p_addr, const enum km_type km)
  261. {
  262. kunmap_atomic(p_addr, km);
  263. };
  264. static void bm_unmap(unsigned long *p_addr)
  265. {
  266. return __bm_unmap(p_addr, KM_IRQ1);
  267. }
  268. /* long word offset of _bitmap_ sector */
  269. #define S2W(s) ((s)<<(BM_EXT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL))
  270. /* word offset from start of bitmap to word number _in_page_
  271. * modulo longs per page
  272. #define MLPP(X) ((X) % (PAGE_SIZE/sizeof(long))
  273. hm, well, Philipp thinks gcc might not optimze the % into & (... - 1)
  274. so do it explicitly:
  275. */
  276. #define MLPP(X) ((X) & ((PAGE_SIZE/sizeof(long))-1))
  277. /* Long words per page */
  278. #define LWPP (PAGE_SIZE/sizeof(long))
  279. /*
  280. * actually most functions herein should take a struct drbd_bitmap*, not a
  281. * struct drbd_conf*, but for the debug macros I like to have the mdev around
  282. * to be able to report device specific.
  283. */
  284. static void bm_free_pages(struct page **pages, unsigned long number)
  285. {
  286. unsigned long i;
  287. if (!pages)
  288. return;
  289. for (i = 0; i < number; i++) {
  290. if (!pages[i]) {
  291. printk(KERN_ALERT "drbd: bm_free_pages tried to free "
  292. "a NULL pointer; i=%lu n=%lu\n",
  293. i, number);
  294. continue;
  295. }
  296. __free_page(pages[i]);
  297. pages[i] = NULL;
  298. }
  299. }
  300. static void bm_vk_free(void *ptr, int v)
  301. {
  302. if (v)
  303. vfree(ptr);
  304. else
  305. kfree(ptr);
  306. }
  307. /*
  308. * "have" and "want" are NUMBER OF PAGES.
  309. */
  310. static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
  311. {
  312. struct page **old_pages = b->bm_pages;
  313. struct page **new_pages, *page;
  314. unsigned int i, bytes, vmalloced = 0;
  315. unsigned long have = b->bm_number_of_pages;
  316. BUG_ON(have == 0 && old_pages != NULL);
  317. BUG_ON(have != 0 && old_pages == NULL);
  318. if (have == want)
  319. return old_pages;
  320. /* Trying kmalloc first, falling back to vmalloc.
  321. * GFP_KERNEL is ok, as this is done when a lower level disk is
  322. * "attached" to the drbd. Context is receiver thread or cqueue
  323. * thread. As we have no disk yet, we are not in the IO path,
  324. * not even the IO path of the peer. */
  325. bytes = sizeof(struct page *)*want;
  326. new_pages = kmalloc(bytes, GFP_KERNEL);
  327. if (!new_pages) {
  328. new_pages = vmalloc(bytes);
  329. if (!new_pages)
  330. return NULL;
  331. vmalloced = 1;
  332. }
  333. memset(new_pages, 0, bytes);
  334. if (want >= have) {
  335. for (i = 0; i < have; i++)
  336. new_pages[i] = old_pages[i];
  337. for (; i < want; i++) {
  338. page = alloc_page(GFP_HIGHUSER);
  339. if (!page) {
  340. bm_free_pages(new_pages + have, i - have);
  341. bm_vk_free(new_pages, vmalloced);
  342. return NULL;
  343. }
  344. /* we want to know which page it is
  345. * from the endio handlers */
  346. bm_store_page_idx(page, i);
  347. new_pages[i] = page;
  348. }
  349. } else {
  350. for (i = 0; i < want; i++)
  351. new_pages[i] = old_pages[i];
  352. /* NOT HERE, we are outside the spinlock!
  353. bm_free_pages(old_pages + want, have - want);
  354. */
  355. }
  356. if (vmalloced)
  357. b->bm_flags |= BM_P_VMALLOCED;
  358. else
  359. b->bm_flags &= ~BM_P_VMALLOCED;
  360. return new_pages;
  361. }
  362. /*
  363. * called on driver init only. TODO call when a device is created.
  364. * allocates the drbd_bitmap, and stores it in mdev->bitmap.
  365. */
  366. int drbd_bm_init(struct drbd_conf *mdev)
  367. {
  368. struct drbd_bitmap *b = mdev->bitmap;
  369. WARN_ON(b != NULL);
  370. b = kzalloc(sizeof(struct drbd_bitmap), GFP_KERNEL);
  371. if (!b)
  372. return -ENOMEM;
  373. spin_lock_init(&b->bm_lock);
  374. mutex_init(&b->bm_change);
  375. init_waitqueue_head(&b->bm_io_wait);
  376. mdev->bitmap = b;
  377. return 0;
  378. }
  379. sector_t drbd_bm_capacity(struct drbd_conf *mdev)
  380. {
  381. ERR_IF(!mdev->bitmap) return 0;
  382. return mdev->bitmap->bm_dev_capacity;
  383. }
  384. /* called on driver unload. TODO: call when a device is destroyed.
  385. */
  386. void drbd_bm_cleanup(struct drbd_conf *mdev)
  387. {
  388. ERR_IF (!mdev->bitmap) return;
  389. bm_free_pages(mdev->bitmap->bm_pages, mdev->bitmap->bm_number_of_pages);
  390. bm_vk_free(mdev->bitmap->bm_pages, (BM_P_VMALLOCED & mdev->bitmap->bm_flags));
  391. kfree(mdev->bitmap);
  392. mdev->bitmap = NULL;
  393. }
  394. /*
  395. * since (b->bm_bits % BITS_PER_LONG) != 0,
  396. * this masks out the remaining bits.
  397. * Returns the number of bits cleared.
  398. */
  399. #define BITS_PER_PAGE (1UL << (PAGE_SHIFT + 3))
  400. #define BITS_PER_PAGE_MASK (BITS_PER_PAGE - 1)
  401. #define BITS_PER_LONG_MASK (BITS_PER_LONG - 1)
  402. static int bm_clear_surplus(struct drbd_bitmap *b)
  403. {
  404. unsigned long mask;
  405. unsigned long *p_addr, *bm;
  406. int tmp;
  407. int cleared = 0;
  408. /* number of bits modulo bits per page */
  409. tmp = (b->bm_bits & BITS_PER_PAGE_MASK);
  410. /* mask the used bits of the word containing the last bit */
  411. mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1;
  412. /* bitmap is always stored little endian,
  413. * on disk and in core memory alike */
  414. mask = cpu_to_lel(mask);
  415. p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1);
  416. bm = p_addr + (tmp/BITS_PER_LONG);
  417. if (mask) {
  418. /* If mask != 0, we are not exactly aligned, so bm now points
  419. * to the long containing the last bit.
  420. * If mask == 0, bm already points to the word immediately
  421. * after the last (long word aligned) bit. */
  422. cleared = hweight_long(*bm & ~mask);
  423. *bm &= mask;
  424. bm++;
  425. }
  426. if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) {
  427. /* on a 32bit arch, we may need to zero out
  428. * a padding long to align with a 64bit remote */
  429. cleared += hweight_long(*bm);
  430. *bm = 0;
  431. }
  432. bm_unmap(p_addr);
  433. return cleared;
  434. }
  435. static void bm_set_surplus(struct drbd_bitmap *b)
  436. {
  437. unsigned long mask;
  438. unsigned long *p_addr, *bm;
  439. int tmp;
  440. /* number of bits modulo bits per page */
  441. tmp = (b->bm_bits & BITS_PER_PAGE_MASK);
  442. /* mask the used bits of the word containing the last bit */
  443. mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1;
  444. /* bitmap is always stored little endian,
  445. * on disk and in core memory alike */
  446. mask = cpu_to_lel(mask);
  447. p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1);
  448. bm = p_addr + (tmp/BITS_PER_LONG);
  449. if (mask) {
  450. /* If mask != 0, we are not exactly aligned, so bm now points
  451. * to the long containing the last bit.
  452. * If mask == 0, bm already points to the word immediately
  453. * after the last (long word aligned) bit. */
  454. *bm |= ~mask;
  455. bm++;
  456. }
  457. if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) {
  458. /* on a 32bit arch, we may need to zero out
  459. * a padding long to align with a 64bit remote */
  460. *bm = ~0UL;
  461. }
  462. bm_unmap(p_addr);
  463. }
  464. /* you better not modify the bitmap while this is running,
  465. * or its results will be stale */
  466. static unsigned long bm_count_bits(struct drbd_bitmap *b)
  467. {
  468. unsigned long *p_addr;
  469. unsigned long bits = 0;
  470. unsigned long mask = (1UL << (b->bm_bits & BITS_PER_LONG_MASK)) -1;
  471. int idx, i, last_word;
  472. /* all but last page */
  473. for (idx = 0; idx < b->bm_number_of_pages - 1; idx++) {
  474. p_addr = __bm_map_pidx(b, idx, KM_USER0);
  475. for (i = 0; i < LWPP; i++)
  476. bits += hweight_long(p_addr[i]);
  477. __bm_unmap(p_addr, KM_USER0);
  478. cond_resched();
  479. }
  480. /* last (or only) page */
  481. last_word = ((b->bm_bits - 1) & BITS_PER_PAGE_MASK) >> LN2_BPL;
  482. p_addr = __bm_map_pidx(b, idx, KM_USER0);
  483. for (i = 0; i < last_word; i++)
  484. bits += hweight_long(p_addr[i]);
  485. p_addr[last_word] &= cpu_to_lel(mask);
  486. bits += hweight_long(p_addr[last_word]);
  487. /* 32bit arch, may have an unused padding long */
  488. if (BITS_PER_LONG == 32 && (last_word & 1) == 0)
  489. p_addr[last_word+1] = 0;
  490. __bm_unmap(p_addr, KM_USER0);
  491. return bits;
  492. }
  493. /* offset and len in long words.*/
  494. static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len)
  495. {
  496. unsigned long *p_addr, *bm;
  497. unsigned int idx;
  498. size_t do_now, end;
  499. end = offset + len;
  500. if (end > b->bm_words) {
  501. printk(KERN_ALERT "drbd: bm_memset end > bm_words\n");
  502. return;
  503. }
  504. while (offset < end) {
  505. do_now = min_t(size_t, ALIGN(offset + 1, LWPP), end) - offset;
  506. idx = bm_word_to_page_idx(b, offset);
  507. p_addr = bm_map_pidx(b, idx);
  508. bm = p_addr + MLPP(offset);
  509. if (bm+do_now > p_addr + LWPP) {
  510. printk(KERN_ALERT "drbd: BUG BUG BUG! p_addr:%p bm:%p do_now:%d\n",
  511. p_addr, bm, (int)do_now);
  512. } else
  513. memset(bm, c, do_now * sizeof(long));
  514. bm_unmap(p_addr);
  515. bm_set_page_need_writeout(b->bm_pages[idx]);
  516. offset += do_now;
  517. }
  518. }
  519. /*
  520. * make sure the bitmap has enough room for the attached storage,
  521. * if necessary, resize.
  522. * called whenever we may have changed the device size.
  523. * returns -ENOMEM if we could not allocate enough memory, 0 on success.
  524. * In case this is actually a resize, we copy the old bitmap into the new one.
  525. * Otherwise, the bitmap is initialized to all bits set.
  526. */
  527. int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits)
  528. {
  529. struct drbd_bitmap *b = mdev->bitmap;
  530. unsigned long bits, words, owords, obits;
  531. unsigned long want, have, onpages; /* number of pages */
  532. struct page **npages, **opages = NULL;
  533. int err = 0, growing;
  534. int opages_vmalloced;
  535. ERR_IF(!b) return -ENOMEM;
  536. drbd_bm_lock(mdev, "resize", BM_LOCKED_MASK);
  537. dev_info(DEV, "drbd_bm_resize called with capacity == %llu\n",
  538. (unsigned long long)capacity);
  539. if (capacity == b->bm_dev_capacity)
  540. goto out;
  541. opages_vmalloced = (BM_P_VMALLOCED & b->bm_flags);
  542. if (capacity == 0) {
  543. spin_lock_irq(&b->bm_lock);
  544. opages = b->bm_pages;
  545. onpages = b->bm_number_of_pages;
  546. owords = b->bm_words;
  547. b->bm_pages = NULL;
  548. b->bm_number_of_pages =
  549. b->bm_set =
  550. b->bm_bits =
  551. b->bm_words =
  552. b->bm_dev_capacity = 0;
  553. spin_unlock_irq(&b->bm_lock);
  554. bm_free_pages(opages, onpages);
  555. bm_vk_free(opages, opages_vmalloced);
  556. goto out;
  557. }
  558. bits = BM_SECT_TO_BIT(ALIGN(capacity, BM_SECT_PER_BIT));
  559. /* if we would use
  560. words = ALIGN(bits,BITS_PER_LONG) >> LN2_BPL;
  561. a 32bit host could present the wrong number of words
  562. to a 64bit host.
  563. */
  564. words = ALIGN(bits, 64) >> LN2_BPL;
  565. if (get_ldev(mdev)) {
  566. u64 bits_on_disk = ((u64)mdev->ldev->md.md_size_sect-MD_BM_OFFSET) << 12;
  567. put_ldev(mdev);
  568. if (bits > bits_on_disk) {
  569. dev_info(DEV, "bits = %lu\n", bits);
  570. dev_info(DEV, "bits_on_disk = %llu\n", bits_on_disk);
  571. err = -ENOSPC;
  572. goto out;
  573. }
  574. }
  575. want = ALIGN(words*sizeof(long), PAGE_SIZE) >> PAGE_SHIFT;
  576. have = b->bm_number_of_pages;
  577. if (want == have) {
  578. D_ASSERT(b->bm_pages != NULL);
  579. npages = b->bm_pages;
  580. } else {
  581. if (drbd_insert_fault(mdev, DRBD_FAULT_BM_ALLOC))
  582. npages = NULL;
  583. else
  584. npages = bm_realloc_pages(b, want);
  585. }
  586. if (!npages) {
  587. err = -ENOMEM;
  588. goto out;
  589. }
  590. spin_lock_irq(&b->bm_lock);
  591. opages = b->bm_pages;
  592. owords = b->bm_words;
  593. obits = b->bm_bits;
  594. growing = bits > obits;
  595. if (opages && growing && set_new_bits)
  596. bm_set_surplus(b);
  597. b->bm_pages = npages;
  598. b->bm_number_of_pages = want;
  599. b->bm_bits = bits;
  600. b->bm_words = words;
  601. b->bm_dev_capacity = capacity;
  602. if (growing) {
  603. if (set_new_bits) {
  604. bm_memset(b, owords, 0xff, words-owords);
  605. b->bm_set += bits - obits;
  606. } else
  607. bm_memset(b, owords, 0x00, words-owords);
  608. }
  609. if (want < have) {
  610. /* implicit: (opages != NULL) && (opages != npages) */
  611. bm_free_pages(opages + want, have - want);
  612. }
  613. (void)bm_clear_surplus(b);
  614. spin_unlock_irq(&b->bm_lock);
  615. if (opages != npages)
  616. bm_vk_free(opages, opages_vmalloced);
  617. if (!growing)
  618. b->bm_set = bm_count_bits(b);
  619. dev_info(DEV, "resync bitmap: bits=%lu words=%lu pages=%lu\n", bits, words, want);
  620. out:
  621. drbd_bm_unlock(mdev);
  622. return err;
  623. }
  624. /* inherently racy:
  625. * if not protected by other means, return value may be out of date when
  626. * leaving this function...
  627. * we still need to lock it, since it is important that this returns
  628. * bm_set == 0 precisely.
  629. *
  630. * maybe bm_set should be atomic_t ?
  631. */
  632. unsigned long _drbd_bm_total_weight(struct drbd_conf *mdev)
  633. {
  634. struct drbd_bitmap *b = mdev->bitmap;
  635. unsigned long s;
  636. unsigned long flags;
  637. ERR_IF(!b) return 0;
  638. ERR_IF(!b->bm_pages) return 0;
  639. spin_lock_irqsave(&b->bm_lock, flags);
  640. s = b->bm_set;
  641. spin_unlock_irqrestore(&b->bm_lock, flags);
  642. return s;
  643. }
  644. unsigned long drbd_bm_total_weight(struct drbd_conf *mdev)
  645. {
  646. unsigned long s;
  647. /* if I don't have a disk, I don't know about out-of-sync status */
  648. if (!get_ldev_if_state(mdev, D_NEGOTIATING))
  649. return 0;
  650. s = _drbd_bm_total_weight(mdev);
  651. put_ldev(mdev);
  652. return s;
  653. }
  654. size_t drbd_bm_words(struct drbd_conf *mdev)
  655. {
  656. struct drbd_bitmap *b = mdev->bitmap;
  657. ERR_IF(!b) return 0;
  658. ERR_IF(!b->bm_pages) return 0;
  659. return b->bm_words;
  660. }
  661. unsigned long drbd_bm_bits(struct drbd_conf *mdev)
  662. {
  663. struct drbd_bitmap *b = mdev->bitmap;
  664. ERR_IF(!b) return 0;
  665. return b->bm_bits;
  666. }
  667. /* merge number words from buffer into the bitmap starting at offset.
  668. * buffer[i] is expected to be little endian unsigned long.
  669. * bitmap must be locked by drbd_bm_lock.
  670. * currently only used from receive_bitmap.
  671. */
  672. void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, size_t number,
  673. unsigned long *buffer)
  674. {
  675. struct drbd_bitmap *b = mdev->bitmap;
  676. unsigned long *p_addr, *bm;
  677. unsigned long word, bits;
  678. unsigned int idx;
  679. size_t end, do_now;
  680. end = offset + number;
  681. ERR_IF(!b) return;
  682. ERR_IF(!b->bm_pages) return;
  683. if (number == 0)
  684. return;
  685. WARN_ON(offset >= b->bm_words);
  686. WARN_ON(end > b->bm_words);
  687. spin_lock_irq(&b->bm_lock);
  688. while (offset < end) {
  689. do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset;
  690. idx = bm_word_to_page_idx(b, offset);
  691. p_addr = bm_map_pidx(b, idx);
  692. bm = p_addr + MLPP(offset);
  693. offset += do_now;
  694. while (do_now--) {
  695. bits = hweight_long(*bm);
  696. word = *bm | *buffer++;
  697. *bm++ = word;
  698. b->bm_set += hweight_long(word) - bits;
  699. }
  700. bm_unmap(p_addr);
  701. bm_set_page_need_writeout(b->bm_pages[idx]);
  702. }
  703. /* with 32bit <-> 64bit cross-platform connect
  704. * this is only correct for current usage,
  705. * where we _know_ that we are 64 bit aligned,
  706. * and know that this function is used in this way, too...
  707. */
  708. if (end == b->bm_words)
  709. b->bm_set -= bm_clear_surplus(b);
  710. spin_unlock_irq(&b->bm_lock);
  711. }
  712. /* copy number words from the bitmap starting at offset into the buffer.
  713. * buffer[i] will be little endian unsigned long.
  714. */
  715. void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset, size_t number,
  716. unsigned long *buffer)
  717. {
  718. struct drbd_bitmap *b = mdev->bitmap;
  719. unsigned long *p_addr, *bm;
  720. size_t end, do_now;
  721. end = offset + number;
  722. ERR_IF(!b) return;
  723. ERR_IF(!b->bm_pages) return;
  724. spin_lock_irq(&b->bm_lock);
  725. if ((offset >= b->bm_words) ||
  726. (end > b->bm_words) ||
  727. (number <= 0))
  728. dev_err(DEV, "offset=%lu number=%lu bm_words=%lu\n",
  729. (unsigned long) offset,
  730. (unsigned long) number,
  731. (unsigned long) b->bm_words);
  732. else {
  733. while (offset < end) {
  734. do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset;
  735. p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, offset));
  736. bm = p_addr + MLPP(offset);
  737. offset += do_now;
  738. while (do_now--)
  739. *buffer++ = *bm++;
  740. bm_unmap(p_addr);
  741. }
  742. }
  743. spin_unlock_irq(&b->bm_lock);
  744. }
  745. /* set all bits in the bitmap */
  746. void drbd_bm_set_all(struct drbd_conf *mdev)
  747. {
  748. struct drbd_bitmap *b = mdev->bitmap;
  749. ERR_IF(!b) return;
  750. ERR_IF(!b->bm_pages) return;
  751. spin_lock_irq(&b->bm_lock);
  752. bm_memset(b, 0, 0xff, b->bm_words);
  753. (void)bm_clear_surplus(b);
  754. b->bm_set = b->bm_bits;
  755. spin_unlock_irq(&b->bm_lock);
  756. }
  757. /* clear all bits in the bitmap */
  758. void drbd_bm_clear_all(struct drbd_conf *mdev)
  759. {
  760. struct drbd_bitmap *b = mdev->bitmap;
  761. ERR_IF(!b) return;
  762. ERR_IF(!b->bm_pages) return;
  763. spin_lock_irq(&b->bm_lock);
  764. bm_memset(b, 0, 0, b->bm_words);
  765. b->bm_set = 0;
  766. spin_unlock_irq(&b->bm_lock);
  767. }
  768. struct bm_aio_ctx {
  769. struct drbd_conf *mdev;
  770. atomic_t in_flight;
  771. struct completion done;
  772. unsigned flags;
  773. #define BM_AIO_COPY_PAGES 1
  774. int error;
  775. };
  776. /* bv_page may be a copy, or may be the original */
  777. static void bm_async_io_complete(struct bio *bio, int error)
  778. {
  779. struct bm_aio_ctx *ctx = bio->bi_private;
  780. struct drbd_conf *mdev = ctx->mdev;
  781. struct drbd_bitmap *b = mdev->bitmap;
  782. unsigned int idx = bm_page_to_idx(bio->bi_io_vec[0].bv_page);
  783. int uptodate = bio_flagged(bio, BIO_UPTODATE);
  784. /* strange behavior of some lower level drivers...
  785. * fail the request by clearing the uptodate flag,
  786. * but do not return any error?!
  787. * do we want to WARN() on this? */
  788. if (!error && !uptodate)
  789. error = -EIO;
  790. if ((ctx->flags & BM_AIO_COPY_PAGES) == 0 &&
  791. !bm_test_page_unchanged(b->bm_pages[idx]))
  792. dev_warn(DEV, "bitmap page idx %u changed during IO!\n", idx);
  793. if (error) {
  794. /* ctx error will hold the completed-last non-zero error code,
  795. * in case error codes differ. */
  796. ctx->error = error;
  797. bm_set_page_io_err(b->bm_pages[idx]);
  798. /* Not identical to on disk version of it.
  799. * Is BM_PAGE_IO_ERROR enough? */
  800. if (__ratelimit(&drbd_ratelimit_state))
  801. dev_err(DEV, "IO ERROR %d on bitmap page idx %u\n",
  802. error, idx);
  803. } else {
  804. bm_clear_page_io_err(b->bm_pages[idx]);
  805. dynamic_dev_dbg(DEV, "bitmap page idx %u completed\n", idx);
  806. }
  807. bm_page_unlock_io(mdev, idx);
  808. /* FIXME give back to page pool */
  809. if (ctx->flags & BM_AIO_COPY_PAGES)
  810. put_page(bio->bi_io_vec[0].bv_page);
  811. bio_put(bio);
  812. if (atomic_dec_and_test(&ctx->in_flight))
  813. complete(&ctx->done);
  814. }
  815. static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must_hold(local)
  816. {
  817. /* we are process context. we always get a bio */
  818. struct bio *bio = bio_alloc(GFP_KERNEL, 1);
  819. struct drbd_conf *mdev = ctx->mdev;
  820. struct drbd_bitmap *b = mdev->bitmap;
  821. struct page *page;
  822. unsigned int len;
  823. sector_t on_disk_sector =
  824. mdev->ldev->md.md_offset + mdev->ldev->md.bm_offset;
  825. on_disk_sector += ((sector_t)page_nr) << (PAGE_SHIFT-9);
  826. /* this might happen with very small
  827. * flexible external meta data device,
  828. * or with PAGE_SIZE > 4k */
  829. len = min_t(unsigned int, PAGE_SIZE,
  830. (drbd_md_last_sector(mdev->ldev) - on_disk_sector + 1)<<9);
  831. /* serialize IO on this page */
  832. bm_page_lock_io(mdev, page_nr);
  833. /* before memcpy and submit,
  834. * so it can be redirtied any time */
  835. bm_set_page_unchanged(b->bm_pages[page_nr]);
  836. if (ctx->flags & BM_AIO_COPY_PAGES) {
  837. /* FIXME alloc_page is good enough for now, but actually needs
  838. * to use pre-allocated page pool */
  839. void *src, *dest;
  840. page = alloc_page(__GFP_HIGHMEM|__GFP_WAIT);
  841. dest = kmap_atomic(page, KM_USER0);
  842. src = kmap_atomic(b->bm_pages[page_nr], KM_USER1);
  843. memcpy(dest, src, PAGE_SIZE);
  844. kunmap_atomic(src, KM_USER1);
  845. kunmap_atomic(dest, KM_USER0);
  846. bm_store_page_idx(page, page_nr);
  847. } else
  848. page = b->bm_pages[page_nr];
  849. bio->bi_bdev = mdev->ldev->md_bdev;
  850. bio->bi_sector = on_disk_sector;
  851. bio_add_page(bio, page, len, 0);
  852. bio->bi_private = ctx;
  853. bio->bi_end_io = bm_async_io_complete;
  854. if (drbd_insert_fault(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
  855. bio->bi_rw |= rw;
  856. bio_endio(bio, -EIO);
  857. } else {
  858. submit_bio(rw, bio);
  859. }
  860. }
  861. /*
  862. * bm_rw: read/write the whole bitmap from/to its on disk location.
  863. */
  864. static int bm_rw(struct drbd_conf *mdev, int rw, unsigned lazy_writeout_upper_idx) __must_hold(local)
  865. {
  866. struct bm_aio_ctx ctx = {
  867. .mdev = mdev,
  868. .in_flight = ATOMIC_INIT(1),
  869. .done = COMPLETION_INITIALIZER_ONSTACK(ctx.done),
  870. .flags = lazy_writeout_upper_idx ? BM_AIO_COPY_PAGES : 0,
  871. };
  872. struct drbd_bitmap *b = mdev->bitmap;
  873. int num_pages, i, count = 0;
  874. unsigned long now;
  875. char ppb[10];
  876. int err = 0;
  877. /*
  878. * We are protected against bitmap disappearing/resizing by holding an
  879. * ldev reference (caller must have called get_ldev()).
  880. * For read/write, we are protected against changes to the bitmap by
  881. * the bitmap lock (see drbd_bitmap_io).
  882. * For lazy writeout, we don't care for ongoing changes to the bitmap,
  883. * as we submit copies of pages anyways.
  884. */
  885. if (!ctx.flags)
  886. WARN_ON(!(BM_LOCKED_MASK & b->bm_flags));
  887. num_pages = b->bm_number_of_pages;
  888. now = jiffies;
  889. /* let the layers below us try to merge these bios... */
  890. for (i = 0; i < num_pages; i++) {
  891. /* ignore completely unchanged pages */
  892. if (lazy_writeout_upper_idx && i == lazy_writeout_upper_idx)
  893. break;
  894. if (rw & WRITE) {
  895. if (bm_test_page_unchanged(b->bm_pages[i])) {
  896. dynamic_dev_dbg(DEV, "skipped bm write for idx %u\n", i);
  897. continue;
  898. }
  899. /* during lazy writeout,
  900. * ignore those pages not marked for lazy writeout. */
  901. if (lazy_writeout_upper_idx &&
  902. !bm_test_page_lazy_writeout(b->bm_pages[i])) {
  903. dynamic_dev_dbg(DEV, "skipped bm lazy write for idx %u\n", i);
  904. continue;
  905. }
  906. }
  907. atomic_inc(&ctx.in_flight);
  908. bm_page_io_async(&ctx, i, rw);
  909. ++count;
  910. cond_resched();
  911. }
  912. /*
  913. * We initialize ctx.in_flight to one to make sure bm_async_io_complete
  914. * will not complete() early, and decrement / test it here. If there
  915. * are still some bios in flight, we need to wait for them here.
  916. */
  917. if (!atomic_dec_and_test(&ctx.in_flight))
  918. wait_for_completion(&ctx.done);
  919. dev_info(DEV, "bitmap %s of %u pages took %lu jiffies\n",
  920. rw == WRITE ? "WRITE" : "READ",
  921. count, jiffies - now);
  922. if (ctx.error) {
  923. dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n");
  924. drbd_chk_io_error(mdev, 1, true);
  925. err = -EIO; /* ctx.error ? */
  926. }
  927. now = jiffies;
  928. if (rw == WRITE) {
  929. drbd_md_flush(mdev);
  930. } else /* rw == READ */ {
  931. b->bm_set = bm_count_bits(b);
  932. dev_info(DEV, "recounting of set bits took additional %lu jiffies\n",
  933. jiffies - now);
  934. }
  935. now = b->bm_set;
  936. dev_info(DEV, "%s (%lu bits) marked out-of-sync by on disk bit-map.\n",
  937. ppsize(ppb, now << (BM_BLOCK_SHIFT-10)), now);
  938. return err;
  939. }
  940. /**
  941. * drbd_bm_read() - Read the whole bitmap from its on disk location.
  942. * @mdev: DRBD device.
  943. */
  944. int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local)
  945. {
  946. return bm_rw(mdev, READ, 0);
  947. }
  948. /**
  949. * drbd_bm_write() - Write the whole bitmap to its on disk location.
  950. * @mdev: DRBD device.
  951. *
  952. * Will only write pages that have changed since last IO.
  953. */
  954. int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local)
  955. {
  956. return bm_rw(mdev, WRITE, 0);
  957. }
  958. /**
  959. * drbd_bm_lazy_write_out() - Write bitmap pages 0 to @upper_idx-1, if they have changed.
  960. * @mdev: DRBD device.
  961. * @upper_idx: 0: write all changed pages; +ve: page index to stop scanning for changed pages
  962. */
  963. int drbd_bm_write_lazy(struct drbd_conf *mdev, unsigned upper_idx) __must_hold(local)
  964. {
  965. return bm_rw(mdev, WRITE, upper_idx);
  966. }
  967. /**
  968. * drbd_bm_write_page: Writes a PAGE_SIZE aligned piece of bitmap
  969. * @mdev: DRBD device.
  970. * @idx: bitmap page index
  971. *
  972. * We don't want to special case on logical_block_size of the backend device,
  973. * so we submit PAGE_SIZE aligned pieces.
  974. * Note that on "most" systems, PAGE_SIZE is 4k.
  975. *
  976. * In case this becomes an issue on systems with larger PAGE_SIZE,
  977. * we may want to change this again to write 4k aligned 4k pieces.
  978. */
  979. int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local)
  980. {
  981. struct bm_aio_ctx ctx = {
  982. .mdev = mdev,
  983. .in_flight = ATOMIC_INIT(1),
  984. .done = COMPLETION_INITIALIZER_ONSTACK(ctx.done),
  985. .flags = BM_AIO_COPY_PAGES,
  986. };
  987. if (bm_test_page_unchanged(mdev->bitmap->bm_pages[idx])) {
  988. dynamic_dev_dbg(DEV, "skipped bm page write for idx %u\n", idx);
  989. return 0;
  990. }
  991. bm_page_io_async(&ctx, idx, WRITE_SYNC);
  992. wait_for_completion(&ctx.done);
  993. if (ctx.error)
  994. drbd_chk_io_error(mdev, 1, true);
  995. /* that should force detach, so the in memory bitmap will be
  996. * gone in a moment as well. */
  997. mdev->bm_writ_cnt++;
  998. return ctx.error;
  999. }
  1000. /* NOTE
  1001. * find_first_bit returns int, we return unsigned long.
  1002. * For this to work on 32bit arch with bitnumbers > (1<<32),
  1003. * we'd need to return u64, and get a whole lot of other places
  1004. * fixed where we still use unsigned long.
  1005. *
  1006. * this returns a bit number, NOT a sector!
  1007. */
  1008. static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo,
  1009. const int find_zero_bit, const enum km_type km)
  1010. {
  1011. struct drbd_bitmap *b = mdev->bitmap;
  1012. unsigned long *p_addr;
  1013. unsigned long bit_offset;
  1014. unsigned i;
  1015. if (bm_fo > b->bm_bits) {
  1016. dev_err(DEV, "bm_fo=%lu bm_bits=%lu\n", bm_fo, b->bm_bits);
  1017. bm_fo = DRBD_END_OF_BITMAP;
  1018. } else {
  1019. while (bm_fo < b->bm_bits) {
  1020. /* bit offset of the first bit in the page */
  1021. bit_offset = bm_fo & ~BITS_PER_PAGE_MASK;
  1022. p_addr = __bm_map_pidx(b, bm_bit_to_page_idx(b, bm_fo), km);
  1023. if (find_zero_bit)
  1024. i = find_next_zero_bit_le(p_addr,
  1025. PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK);
  1026. else
  1027. i = find_next_bit_le(p_addr,
  1028. PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK);
  1029. __bm_unmap(p_addr, km);
  1030. if (i < PAGE_SIZE*8) {
  1031. bm_fo = bit_offset + i;
  1032. if (bm_fo >= b->bm_bits)
  1033. break;
  1034. goto found;
  1035. }
  1036. bm_fo = bit_offset + PAGE_SIZE*8;
  1037. }
  1038. bm_fo = DRBD_END_OF_BITMAP;
  1039. }
  1040. found:
  1041. return bm_fo;
  1042. }
  1043. static unsigned long bm_find_next(struct drbd_conf *mdev,
  1044. unsigned long bm_fo, const int find_zero_bit)
  1045. {
  1046. struct drbd_bitmap *b = mdev->bitmap;
  1047. unsigned long i = DRBD_END_OF_BITMAP;
  1048. ERR_IF(!b) return i;
  1049. ERR_IF(!b->bm_pages) return i;
  1050. spin_lock_irq(&b->bm_lock);
  1051. if (BM_DONT_TEST & b->bm_flags)
  1052. bm_print_lock_info(mdev);
  1053. i = __bm_find_next(mdev, bm_fo, find_zero_bit, KM_IRQ1);
  1054. spin_unlock_irq(&b->bm_lock);
  1055. return i;
  1056. }
  1057. unsigned long drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo)
  1058. {
  1059. return bm_find_next(mdev, bm_fo, 0);
  1060. }
  1061. #if 0
  1062. /* not yet needed for anything. */
  1063. unsigned long drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo)
  1064. {
  1065. return bm_find_next(mdev, bm_fo, 1);
  1066. }
  1067. #endif
  1068. /* does not spin_lock_irqsave.
  1069. * you must take drbd_bm_lock() first */
  1070. unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo)
  1071. {
  1072. /* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */
  1073. return __bm_find_next(mdev, bm_fo, 0, KM_USER1);
  1074. }
  1075. unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo)
  1076. {
  1077. /* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */
  1078. return __bm_find_next(mdev, bm_fo, 1, KM_USER1);
  1079. }
  1080. /* returns number of bits actually changed.
  1081. * for val != 0, we change 0 -> 1, return code positive
  1082. * for val == 0, we change 1 -> 0, return code negative
  1083. * wants bitnr, not sector.
  1084. * expected to be called for only a few bits (e - s about BITS_PER_LONG).
  1085. * Must hold bitmap lock already. */
  1086. static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
  1087. unsigned long e, int val, const enum km_type km)
  1088. {
  1089. struct drbd_bitmap *b = mdev->bitmap;
  1090. unsigned long *p_addr = NULL;
  1091. unsigned long bitnr;
  1092. unsigned int last_page_nr = -1U;
  1093. int c = 0;
  1094. int changed_total = 0;
  1095. if (e >= b->bm_bits) {
  1096. dev_err(DEV, "ASSERT FAILED: bit_s=%lu bit_e=%lu bm_bits=%lu\n",
  1097. s, e, b->bm_bits);
  1098. e = b->bm_bits ? b->bm_bits -1 : 0;
  1099. }
  1100. for (bitnr = s; bitnr <= e; bitnr++) {
  1101. unsigned int page_nr = bm_bit_to_page_idx(b, bitnr);
  1102. if (page_nr != last_page_nr) {
  1103. if (p_addr)
  1104. __bm_unmap(p_addr, km);
  1105. if (c < 0)
  1106. bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
  1107. else if (c > 0)
  1108. bm_set_page_need_writeout(b->bm_pages[last_page_nr]);
  1109. changed_total += c;
  1110. c = 0;
  1111. p_addr = __bm_map_pidx(b, page_nr, km);
  1112. last_page_nr = page_nr;
  1113. }
  1114. if (val)
  1115. c += (0 == __test_and_set_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr));
  1116. else
  1117. c -= (0 != __test_and_clear_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr));
  1118. }
  1119. if (p_addr)
  1120. __bm_unmap(p_addr, km);
  1121. if (c < 0)
  1122. bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
  1123. else if (c > 0)
  1124. bm_set_page_need_writeout(b->bm_pages[last_page_nr]);
  1125. changed_total += c;
  1126. b->bm_set += changed_total;
  1127. return changed_total;
  1128. }
  1129. /* returns number of bits actually changed.
  1130. * for val != 0, we change 0 -> 1, return code positive
  1131. * for val == 0, we change 1 -> 0, return code negative
  1132. * wants bitnr, not sector */
  1133. static int bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
  1134. const unsigned long e, int val)
  1135. {
  1136. unsigned long flags;
  1137. struct drbd_bitmap *b = mdev->bitmap;
  1138. int c = 0;
  1139. ERR_IF(!b) return 1;
  1140. ERR_IF(!b->bm_pages) return 0;
  1141. spin_lock_irqsave(&b->bm_lock, flags);
  1142. if ((val ? BM_DONT_SET : BM_DONT_CLEAR) & b->bm_flags)
  1143. bm_print_lock_info(mdev);
  1144. c = __bm_change_bits_to(mdev, s, e, val, KM_IRQ1);
  1145. spin_unlock_irqrestore(&b->bm_lock, flags);
  1146. return c;
  1147. }
  1148. /* returns number of bits changed 0 -> 1 */
  1149. int drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
  1150. {
  1151. return bm_change_bits_to(mdev, s, e, 1);
  1152. }
  1153. /* returns number of bits changed 1 -> 0 */
  1154. int drbd_bm_clear_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
  1155. {
  1156. return -bm_change_bits_to(mdev, s, e, 0);
  1157. }
  1158. /* sets all bits in full words,
  1159. * from first_word up to, but not including, last_word */
  1160. static inline void bm_set_full_words_within_one_page(struct drbd_bitmap *b,
  1161. int page_nr, int first_word, int last_word)
  1162. {
  1163. int i;
  1164. int bits;
  1165. unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr], KM_USER0);
  1166. for (i = first_word; i < last_word; i++) {
  1167. bits = hweight_long(paddr[i]);
  1168. paddr[i] = ~0UL;
  1169. b->bm_set += BITS_PER_LONG - bits;
  1170. }
  1171. kunmap_atomic(paddr, KM_USER0);
  1172. }
  1173. /* Same thing as drbd_bm_set_bits, but without taking the spin_lock_irqsave.
  1174. * You must first drbd_bm_lock().
  1175. * Can be called to set the whole bitmap in one go.
  1176. * Sets bits from s to e _inclusive_. */
  1177. void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
  1178. {
  1179. /* First set_bit from the first bit (s)
  1180. * up to the next long boundary (sl),
  1181. * then assign full words up to the last long boundary (el),
  1182. * then set_bit up to and including the last bit (e).
  1183. *
  1184. * Do not use memset, because we must account for changes,
  1185. * so we need to loop over the words with hweight() anyways.
  1186. */
  1187. unsigned long sl = ALIGN(s,BITS_PER_LONG);
  1188. unsigned long el = (e+1) & ~((unsigned long)BITS_PER_LONG-1);
  1189. int first_page;
  1190. int last_page;
  1191. int page_nr;
  1192. int first_word;
  1193. int last_word;
  1194. if (e - s <= 3*BITS_PER_LONG) {
  1195. /* don't bother; el and sl may even be wrong. */
  1196. __bm_change_bits_to(mdev, s, e, 1, KM_USER0);
  1197. return;
  1198. }
  1199. /* difference is large enough that we can trust sl and el */
  1200. /* bits filling the current long */
  1201. if (sl)
  1202. __bm_change_bits_to(mdev, s, sl-1, 1, KM_USER0);
  1203. first_page = sl >> (3 + PAGE_SHIFT);
  1204. last_page = el >> (3 + PAGE_SHIFT);
  1205. /* MLPP: modulo longs per page */
  1206. /* LWPP: long words per page */
  1207. first_word = MLPP(sl >> LN2_BPL);
  1208. last_word = LWPP;
  1209. /* first and full pages, unless first page == last page */
  1210. for (page_nr = first_page; page_nr < last_page; page_nr++) {
  1211. bm_set_full_words_within_one_page(mdev->bitmap, page_nr, first_word, last_word);
  1212. cond_resched();
  1213. first_word = 0;
  1214. }
  1215. /* last page (respectively only page, for first page == last page) */
  1216. last_word = MLPP(el >> LN2_BPL);
  1217. bm_set_full_words_within_one_page(mdev->bitmap, last_page, first_word, last_word);
  1218. /* possibly trailing bits.
  1219. * example: (e & 63) == 63, el will be e+1.
  1220. * if that even was the very last bit,
  1221. * it would trigger an assert in __bm_change_bits_to()
  1222. */
  1223. if (el <= e)
  1224. __bm_change_bits_to(mdev, el, e, 1, KM_USER0);
  1225. }
  1226. /* returns bit state
  1227. * wants bitnr, NOT sector.
  1228. * inherently racy... area needs to be locked by means of {al,rs}_lru
  1229. * 1 ... bit set
  1230. * 0 ... bit not set
  1231. * -1 ... first out of bounds access, stop testing for bits!
  1232. */
  1233. int drbd_bm_test_bit(struct drbd_conf *mdev, const unsigned long bitnr)
  1234. {
  1235. unsigned long flags;
  1236. struct drbd_bitmap *b = mdev->bitmap;
  1237. unsigned long *p_addr;
  1238. int i;
  1239. ERR_IF(!b) return 0;
  1240. ERR_IF(!b->bm_pages) return 0;
  1241. spin_lock_irqsave(&b->bm_lock, flags);
  1242. if (BM_DONT_TEST & b->bm_flags)
  1243. bm_print_lock_info(mdev);
  1244. if (bitnr < b->bm_bits) {
  1245. p_addr = bm_map_pidx(b, bm_bit_to_page_idx(b, bitnr));
  1246. i = test_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr) ? 1 : 0;
  1247. bm_unmap(p_addr);
  1248. } else if (bitnr == b->bm_bits) {
  1249. i = -1;
  1250. } else { /* (bitnr > b->bm_bits) */
  1251. dev_err(DEV, "bitnr=%lu > bm_bits=%lu\n", bitnr, b->bm_bits);
  1252. i = 0;
  1253. }
  1254. spin_unlock_irqrestore(&b->bm_lock, flags);
  1255. return i;
  1256. }
  1257. /* returns number of bits set in the range [s, e] */
  1258. int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
  1259. {
  1260. unsigned long flags;
  1261. struct drbd_bitmap *b = mdev->bitmap;
  1262. unsigned long *p_addr = NULL;
  1263. unsigned long bitnr;
  1264. unsigned int page_nr = -1U;
  1265. int c = 0;
  1266. /* If this is called without a bitmap, that is a bug. But just to be
  1267. * robust in case we screwed up elsewhere, in that case pretend there
  1268. * was one dirty bit in the requested area, so we won't try to do a
  1269. * local read there (no bitmap probably implies no disk) */
  1270. ERR_IF(!b) return 1;
  1271. ERR_IF(!b->bm_pages) return 1;
  1272. spin_lock_irqsave(&b->bm_lock, flags);
  1273. if (BM_DONT_TEST & b->bm_flags)
  1274. bm_print_lock_info(mdev);
  1275. for (bitnr = s; bitnr <= e; bitnr++) {
  1276. unsigned int idx = bm_bit_to_page_idx(b, bitnr);
  1277. if (page_nr != idx) {
  1278. page_nr = idx;
  1279. if (p_addr)
  1280. bm_unmap(p_addr);
  1281. p_addr = bm_map_pidx(b, idx);
  1282. }
  1283. ERR_IF (bitnr >= b->bm_bits) {
  1284. dev_err(DEV, "bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits);
  1285. } else {
  1286. c += (0 != test_bit_le(bitnr - (page_nr << (PAGE_SHIFT+3)), p_addr));
  1287. }
  1288. }
  1289. if (p_addr)
  1290. bm_unmap(p_addr);
  1291. spin_unlock_irqrestore(&b->bm_lock, flags);
  1292. return c;
  1293. }
  1294. /* inherently racy...
  1295. * return value may be already out-of-date when this function returns.
  1296. * but the general usage is that this is only use during a cstate when bits are
  1297. * only cleared, not set, and typically only care for the case when the return
  1298. * value is zero, or we already "locked" this "bitmap extent" by other means.
  1299. *
  1300. * enr is bm-extent number, since we chose to name one sector (512 bytes)
  1301. * worth of the bitmap a "bitmap extent".
  1302. *
  1303. * TODO
  1304. * I think since we use it like a reference count, we should use the real
  1305. * reference count of some bitmap extent element from some lru instead...
  1306. *
  1307. */
  1308. int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr)
  1309. {
  1310. struct drbd_bitmap *b = mdev->bitmap;
  1311. int count, s, e;
  1312. unsigned long flags;
  1313. unsigned long *p_addr, *bm;
  1314. ERR_IF(!b) return 0;
  1315. ERR_IF(!b->bm_pages) return 0;
  1316. spin_lock_irqsave(&b->bm_lock, flags);
  1317. if (BM_DONT_TEST & b->bm_flags)
  1318. bm_print_lock_info(mdev);
  1319. s = S2W(enr);
  1320. e = min((size_t)S2W(enr+1), b->bm_words);
  1321. count = 0;
  1322. if (s < b->bm_words) {
  1323. int n = e-s;
  1324. p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, s));
  1325. bm = p_addr + MLPP(s);
  1326. while (n--)
  1327. count += hweight_long(*bm++);
  1328. bm_unmap(p_addr);
  1329. } else {
  1330. dev_err(DEV, "start offset (%d) too large in drbd_bm_e_weight\n", s);
  1331. }
  1332. spin_unlock_irqrestore(&b->bm_lock, flags);
  1333. return count;
  1334. }
  1335. /* Set all bits covered by the AL-extent al_enr.
  1336. * Returns number of bits changed. */
  1337. unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, unsigned long al_enr)
  1338. {
  1339. struct drbd_bitmap *b = mdev->bitmap;
  1340. unsigned long *p_addr, *bm;
  1341. unsigned long weight;
  1342. unsigned long s, e;
  1343. int count, i, do_now;
  1344. ERR_IF(!b) return 0;
  1345. ERR_IF(!b->bm_pages) return 0;
  1346. spin_lock_irq(&b->bm_lock);
  1347. if (BM_DONT_SET & b->bm_flags)
  1348. bm_print_lock_info(mdev);
  1349. weight = b->bm_set;
  1350. s = al_enr * BM_WORDS_PER_AL_EXT;
  1351. e = min_t(size_t, s + BM_WORDS_PER_AL_EXT, b->bm_words);
  1352. /* assert that s and e are on the same page */
  1353. D_ASSERT((e-1) >> (PAGE_SHIFT - LN2_BPL + 3)
  1354. == s >> (PAGE_SHIFT - LN2_BPL + 3));
  1355. count = 0;
  1356. if (s < b->bm_words) {
  1357. i = do_now = e-s;
  1358. p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, s));
  1359. bm = p_addr + MLPP(s);
  1360. while (i--) {
  1361. count += hweight_long(*bm);
  1362. *bm = -1UL;
  1363. bm++;
  1364. }
  1365. bm_unmap(p_addr);
  1366. b->bm_set += do_now*BITS_PER_LONG - count;
  1367. if (e == b->bm_words)
  1368. b->bm_set -= bm_clear_surplus(b);
  1369. } else {
  1370. dev_err(DEV, "start offset (%lu) too large in drbd_bm_ALe_set_all\n", s);
  1371. }
  1372. weight = b->bm_set - weight;
  1373. spin_unlock_irq(&b->bm_lock);
  1374. return weight;
  1375. }