drbd_bitmap.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334
  1. /*
  2. drbd_bitmap.c
  3. This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
  4. Copyright (C) 2004-2008, LINBIT Information Technologies GmbH.
  5. Copyright (C) 2004-2008, Philipp Reisner <philipp.reisner@linbit.com>.
  6. Copyright (C) 2004-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
  7. drbd is free software; you can redistribute it and/or modify
  8. it under the terms of the GNU General Public License as published by
  9. the Free Software Foundation; either version 2, or (at your option)
  10. any later version.
  11. drbd is distributed in the hope that it will be useful,
  12. but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. GNU General Public License for more details.
  15. You should have received a copy of the GNU General Public License
  16. along with drbd; see the file COPYING. If not, write to
  17. the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  18. */
  19. #include <linux/bitops.h>
  20. #include <linux/vmalloc.h>
  21. #include <linux/string.h>
  22. #include <linux/drbd.h>
  23. #include <linux/slab.h>
  24. #include <asm/kmap_types.h>
  25. #include "drbd_int.h"
  26. /* OPAQUE outside this file!
  27. * interface defined in drbd_int.h
  28. * convention:
  29. * function name drbd_bm_... => used elsewhere, "public".
  30. * function name bm_... => internal to implementation, "private".
  31. * Note that since find_first_bit returns int, at the current granularity of
  32. * the bitmap (4KB per byte), this implementation "only" supports up to
  33. * 1<<(32+12) == 16 TB...
  34. */
  35. /*
  36. * NOTE
  37. * Access to the *bm_pages is protected by bm_lock.
  38. * It is safe to read the other members within the lock.
  39. *
  40. * drbd_bm_set_bits is called from bio_endio callbacks,
  41. * We may be called with irq already disabled,
  42. * so we need spin_lock_irqsave().
  43. * And we need the kmap_atomic.
  44. */
  45. struct drbd_bitmap {
  46. struct page **bm_pages;
  47. spinlock_t bm_lock;
  48. /* WARNING unsigned long bm_*:
  49. * 32bit number of bit offset is just enough for 512 MB bitmap.
  50. * it will blow up if we make the bitmap bigger...
  51. * not that it makes much sense to have a bitmap that large,
  52. * rather change the granularity to 16k or 64k or something.
  53. * (that implies other problems, however...)
  54. */
  55. unsigned long bm_set; /* nr of set bits; THINK maybe atomic_t? */
  56. unsigned long bm_bits;
  57. size_t bm_words;
  58. size_t bm_number_of_pages;
  59. sector_t bm_dev_capacity;
  60. struct mutex bm_change; /* serializes resize operations */
  61. atomic_t bm_async_io;
  62. wait_queue_head_t bm_io_wait;
  63. unsigned long bm_flags;
  64. /* debugging aid, in case we are still racy somewhere */
  65. char *bm_why;
  66. struct task_struct *bm_task;
  67. };
  68. /* definition of bits in bm_flags */
  69. #define BM_LOCKED 0
  70. #define BM_MD_IO_ERROR 1
  71. #define BM_P_VMALLOCED 2
  72. static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
  73. unsigned long e, int val, const enum km_type km);
  74. static int bm_is_locked(struct drbd_bitmap *b)
  75. {
  76. return test_bit(BM_LOCKED, &b->bm_flags);
  77. }
  78. #define bm_print_lock_info(m) __bm_print_lock_info(m, __func__)
  79. static void __bm_print_lock_info(struct drbd_conf *mdev, const char *func)
  80. {
  81. struct drbd_bitmap *b = mdev->bitmap;
  82. if (!__ratelimit(&drbd_ratelimit_state))
  83. return;
  84. dev_err(DEV, "FIXME %s in %s, bitmap locked for '%s' by %s\n",
  85. current == mdev->receiver.task ? "receiver" :
  86. current == mdev->asender.task ? "asender" :
  87. current == mdev->worker.task ? "worker" : current->comm,
  88. func, b->bm_why ?: "?",
  89. b->bm_task == mdev->receiver.task ? "receiver" :
  90. b->bm_task == mdev->asender.task ? "asender" :
  91. b->bm_task == mdev->worker.task ? "worker" : "?");
  92. }
  93. void drbd_bm_lock(struct drbd_conf *mdev, char *why)
  94. {
  95. struct drbd_bitmap *b = mdev->bitmap;
  96. int trylock_failed;
  97. if (!b) {
  98. dev_err(DEV, "FIXME no bitmap in drbd_bm_lock!?\n");
  99. return;
  100. }
  101. trylock_failed = !mutex_trylock(&b->bm_change);
  102. if (trylock_failed) {
  103. dev_warn(DEV, "%s going to '%s' but bitmap already locked for '%s' by %s\n",
  104. current == mdev->receiver.task ? "receiver" :
  105. current == mdev->asender.task ? "asender" :
  106. current == mdev->worker.task ? "worker" : current->comm,
  107. why, b->bm_why ?: "?",
  108. b->bm_task == mdev->receiver.task ? "receiver" :
  109. b->bm_task == mdev->asender.task ? "asender" :
  110. b->bm_task == mdev->worker.task ? "worker" : "?");
  111. mutex_lock(&b->bm_change);
  112. }
  113. if (__test_and_set_bit(BM_LOCKED, &b->bm_flags))
  114. dev_err(DEV, "FIXME bitmap already locked in bm_lock\n");
  115. b->bm_why = why;
  116. b->bm_task = current;
  117. }
  118. void drbd_bm_unlock(struct drbd_conf *mdev)
  119. {
  120. struct drbd_bitmap *b = mdev->bitmap;
  121. if (!b) {
  122. dev_err(DEV, "FIXME no bitmap in drbd_bm_unlock!?\n");
  123. return;
  124. }
  125. if (!__test_and_clear_bit(BM_LOCKED, &mdev->bitmap->bm_flags))
  126. dev_err(DEV, "FIXME bitmap not locked in bm_unlock\n");
  127. b->bm_why = NULL;
  128. b->bm_task = NULL;
  129. mutex_unlock(&b->bm_change);
  130. }
  131. /* word offset to long pointer */
  132. static unsigned long *__bm_map_paddr(struct drbd_bitmap *b, unsigned long offset, const enum km_type km)
  133. {
  134. struct page *page;
  135. unsigned long page_nr;
  136. /* page_nr = (word*sizeof(long)) >> PAGE_SHIFT; */
  137. page_nr = offset >> (PAGE_SHIFT - LN2_BPL + 3);
  138. BUG_ON(page_nr >= b->bm_number_of_pages);
  139. page = b->bm_pages[page_nr];
  140. return (unsigned long *) kmap_atomic(page, km);
  141. }
  142. static unsigned long * bm_map_paddr(struct drbd_bitmap *b, unsigned long offset)
  143. {
  144. return __bm_map_paddr(b, offset, KM_IRQ1);
  145. }
  146. static void __bm_unmap(unsigned long *p_addr, const enum km_type km)
  147. {
  148. kunmap_atomic(p_addr, km);
  149. };
  150. static void bm_unmap(unsigned long *p_addr)
  151. {
  152. return __bm_unmap(p_addr, KM_IRQ1);
  153. }
  154. /* long word offset of _bitmap_ sector */
  155. #define S2W(s) ((s)<<(BM_EXT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL))
  156. /* word offset from start of bitmap to word number _in_page_
  157. * modulo longs per page
  158. #define MLPP(X) ((X) % (PAGE_SIZE/sizeof(long))
  159. hm, well, Philipp thinks gcc might not optimze the % into & (... - 1)
  160. so do it explicitly:
  161. */
  162. #define MLPP(X) ((X) & ((PAGE_SIZE/sizeof(long))-1))
  163. /* Long words per page */
  164. #define LWPP (PAGE_SIZE/sizeof(long))
  165. /*
  166. * actually most functions herein should take a struct drbd_bitmap*, not a
  167. * struct drbd_conf*, but for the debug macros I like to have the mdev around
  168. * to be able to report device specific.
  169. */
  170. static void bm_free_pages(struct page **pages, unsigned long number)
  171. {
  172. unsigned long i;
  173. if (!pages)
  174. return;
  175. for (i = 0; i < number; i++) {
  176. if (!pages[i]) {
  177. printk(KERN_ALERT "drbd: bm_free_pages tried to free "
  178. "a NULL pointer; i=%lu n=%lu\n",
  179. i, number);
  180. continue;
  181. }
  182. __free_page(pages[i]);
  183. pages[i] = NULL;
  184. }
  185. }
  186. static void bm_vk_free(void *ptr, int v)
  187. {
  188. if (v)
  189. vfree(ptr);
  190. else
  191. kfree(ptr);
  192. }
  193. /*
  194. * "have" and "want" are NUMBER OF PAGES.
  195. */
  196. static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
  197. {
  198. struct page **old_pages = b->bm_pages;
  199. struct page **new_pages, *page;
  200. unsigned int i, bytes, vmalloced = 0;
  201. unsigned long have = b->bm_number_of_pages;
  202. BUG_ON(have == 0 && old_pages != NULL);
  203. BUG_ON(have != 0 && old_pages == NULL);
  204. if (have == want)
  205. return old_pages;
  206. /* Trying kmalloc first, falling back to vmalloc.
  207. * GFP_KERNEL is ok, as this is done when a lower level disk is
  208. * "attached" to the drbd. Context is receiver thread or cqueue
  209. * thread. As we have no disk yet, we are not in the IO path,
  210. * not even the IO path of the peer. */
  211. bytes = sizeof(struct page *)*want;
  212. new_pages = kmalloc(bytes, GFP_KERNEL);
  213. if (!new_pages) {
  214. new_pages = vmalloc(bytes);
  215. if (!new_pages)
  216. return NULL;
  217. vmalloced = 1;
  218. }
  219. memset(new_pages, 0, bytes);
  220. if (want >= have) {
  221. for (i = 0; i < have; i++)
  222. new_pages[i] = old_pages[i];
  223. for (; i < want; i++) {
  224. page = alloc_page(GFP_HIGHUSER);
  225. if (!page) {
  226. bm_free_pages(new_pages + have, i - have);
  227. bm_vk_free(new_pages, vmalloced);
  228. return NULL;
  229. }
  230. new_pages[i] = page;
  231. }
  232. } else {
  233. for (i = 0; i < want; i++)
  234. new_pages[i] = old_pages[i];
  235. /* NOT HERE, we are outside the spinlock!
  236. bm_free_pages(old_pages + want, have - want);
  237. */
  238. }
  239. if (vmalloced)
  240. set_bit(BM_P_VMALLOCED, &b->bm_flags);
  241. else
  242. clear_bit(BM_P_VMALLOCED, &b->bm_flags);
  243. return new_pages;
  244. }
  245. /*
  246. * called on driver init only. TODO call when a device is created.
  247. * allocates the drbd_bitmap, and stores it in mdev->bitmap.
  248. */
  249. int drbd_bm_init(struct drbd_conf *mdev)
  250. {
  251. struct drbd_bitmap *b = mdev->bitmap;
  252. WARN_ON(b != NULL);
  253. b = kzalloc(sizeof(struct drbd_bitmap), GFP_KERNEL);
  254. if (!b)
  255. return -ENOMEM;
  256. spin_lock_init(&b->bm_lock);
  257. mutex_init(&b->bm_change);
  258. init_waitqueue_head(&b->bm_io_wait);
  259. mdev->bitmap = b;
  260. return 0;
  261. }
  262. sector_t drbd_bm_capacity(struct drbd_conf *mdev)
  263. {
  264. ERR_IF(!mdev->bitmap) return 0;
  265. return mdev->bitmap->bm_dev_capacity;
  266. }
  267. /* called on driver unload. TODO: call when a device is destroyed.
  268. */
  269. void drbd_bm_cleanup(struct drbd_conf *mdev)
  270. {
  271. ERR_IF (!mdev->bitmap) return;
  272. bm_free_pages(mdev->bitmap->bm_pages, mdev->bitmap->bm_number_of_pages);
  273. bm_vk_free(mdev->bitmap->bm_pages, test_bit(BM_P_VMALLOCED, &mdev->bitmap->bm_flags));
  274. kfree(mdev->bitmap);
  275. mdev->bitmap = NULL;
  276. }
  277. /*
  278. * since (b->bm_bits % BITS_PER_LONG) != 0,
  279. * this masks out the remaining bits.
  280. * Returns the number of bits cleared.
  281. */
  282. static int bm_clear_surplus(struct drbd_bitmap *b)
  283. {
  284. const unsigned long mask = (1UL << (b->bm_bits & (BITS_PER_LONG-1))) - 1;
  285. size_t w = b->bm_bits >> LN2_BPL;
  286. int cleared = 0;
  287. unsigned long *p_addr, *bm;
  288. p_addr = bm_map_paddr(b, w);
  289. bm = p_addr + MLPP(w);
  290. if (w < b->bm_words) {
  291. cleared = hweight_long(*bm & ~mask);
  292. *bm &= mask;
  293. w++; bm++;
  294. }
  295. if (w < b->bm_words) {
  296. cleared += hweight_long(*bm);
  297. *bm = 0;
  298. }
  299. bm_unmap(p_addr);
  300. return cleared;
  301. }
  302. static void bm_set_surplus(struct drbd_bitmap *b)
  303. {
  304. const unsigned long mask = (1UL << (b->bm_bits & (BITS_PER_LONG-1))) - 1;
  305. size_t w = b->bm_bits >> LN2_BPL;
  306. unsigned long *p_addr, *bm;
  307. p_addr = bm_map_paddr(b, w);
  308. bm = p_addr + MLPP(w);
  309. if (w < b->bm_words) {
  310. *bm |= ~mask;
  311. bm++; w++;
  312. }
  313. if (w < b->bm_words) {
  314. *bm = ~(0UL);
  315. }
  316. bm_unmap(p_addr);
  317. }
  318. static unsigned long __bm_count_bits(struct drbd_bitmap *b, const int swap_endian)
  319. {
  320. unsigned long *p_addr, *bm, offset = 0;
  321. unsigned long bits = 0;
  322. unsigned long i, do_now;
  323. while (offset < b->bm_words) {
  324. i = do_now = min_t(size_t, b->bm_words-offset, LWPP);
  325. p_addr = __bm_map_paddr(b, offset, KM_USER0);
  326. bm = p_addr + MLPP(offset);
  327. while (i--) {
  328. #ifndef __LITTLE_ENDIAN
  329. if (swap_endian)
  330. *bm = lel_to_cpu(*bm);
  331. #endif
  332. bits += hweight_long(*bm++);
  333. }
  334. __bm_unmap(p_addr, KM_USER0);
  335. offset += do_now;
  336. cond_resched();
  337. }
  338. return bits;
  339. }
  340. static unsigned long bm_count_bits(struct drbd_bitmap *b)
  341. {
  342. return __bm_count_bits(b, 0);
  343. }
  344. static unsigned long bm_count_bits_swap_endian(struct drbd_bitmap *b)
  345. {
  346. return __bm_count_bits(b, 1);
  347. }
  348. /* offset and len in long words.*/
  349. static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len)
  350. {
  351. unsigned long *p_addr, *bm;
  352. size_t do_now, end;
  353. #define BM_SECTORS_PER_BIT (BM_BLOCK_SIZE/512)
  354. end = offset + len;
  355. if (end > b->bm_words) {
  356. printk(KERN_ALERT "drbd: bm_memset end > bm_words\n");
  357. return;
  358. }
  359. while (offset < end) {
  360. do_now = min_t(size_t, ALIGN(offset + 1, LWPP), end) - offset;
  361. p_addr = bm_map_paddr(b, offset);
  362. bm = p_addr + MLPP(offset);
  363. if (bm+do_now > p_addr + LWPP) {
  364. printk(KERN_ALERT "drbd: BUG BUG BUG! p_addr:%p bm:%p do_now:%d\n",
  365. p_addr, bm, (int)do_now);
  366. break; /* breaks to after catch_oob_access_end() only! */
  367. }
  368. memset(bm, c, do_now * sizeof(long));
  369. bm_unmap(p_addr);
  370. offset += do_now;
  371. }
  372. }
  373. /*
  374. * make sure the bitmap has enough room for the attached storage,
  375. * if necessary, resize.
  376. * called whenever we may have changed the device size.
  377. * returns -ENOMEM if we could not allocate enough memory, 0 on success.
  378. * In case this is actually a resize, we copy the old bitmap into the new one.
  379. * Otherwise, the bitmap is initialized to all bits set.
  380. */
  381. int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits)
  382. {
  383. struct drbd_bitmap *b = mdev->bitmap;
  384. unsigned long bits, words, owords, obits, *p_addr, *bm;
  385. unsigned long want, have, onpages; /* number of pages */
  386. struct page **npages, **opages = NULL;
  387. int err = 0, growing;
  388. int opages_vmalloced;
  389. ERR_IF(!b) return -ENOMEM;
  390. drbd_bm_lock(mdev, "resize");
  391. dev_info(DEV, "drbd_bm_resize called with capacity == %llu\n",
  392. (unsigned long long)capacity);
  393. if (capacity == b->bm_dev_capacity)
  394. goto out;
  395. opages_vmalloced = test_bit(BM_P_VMALLOCED, &b->bm_flags);
  396. if (capacity == 0) {
  397. spin_lock_irq(&b->bm_lock);
  398. opages = b->bm_pages;
  399. onpages = b->bm_number_of_pages;
  400. owords = b->bm_words;
  401. b->bm_pages = NULL;
  402. b->bm_number_of_pages =
  403. b->bm_set =
  404. b->bm_bits =
  405. b->bm_words =
  406. b->bm_dev_capacity = 0;
  407. spin_unlock_irq(&b->bm_lock);
  408. bm_free_pages(opages, onpages);
  409. bm_vk_free(opages, opages_vmalloced);
  410. goto out;
  411. }
  412. bits = BM_SECT_TO_BIT(ALIGN(capacity, BM_SECT_PER_BIT));
  413. /* if we would use
  414. words = ALIGN(bits,BITS_PER_LONG) >> LN2_BPL;
  415. a 32bit host could present the wrong number of words
  416. to a 64bit host.
  417. */
  418. words = ALIGN(bits, 64) >> LN2_BPL;
  419. if (get_ldev(mdev)) {
  420. D_ASSERT((u64)bits <= (((u64)mdev->ldev->md.md_size_sect-MD_BM_OFFSET) << 12));
  421. put_ldev(mdev);
  422. }
  423. /* one extra long to catch off by one errors */
  424. want = ALIGN((words+1)*sizeof(long), PAGE_SIZE) >> PAGE_SHIFT;
  425. have = b->bm_number_of_pages;
  426. if (want == have) {
  427. D_ASSERT(b->bm_pages != NULL);
  428. npages = b->bm_pages;
  429. } else {
  430. if (drbd_insert_fault(mdev, DRBD_FAULT_BM_ALLOC))
  431. npages = NULL;
  432. else
  433. npages = bm_realloc_pages(b, want);
  434. }
  435. if (!npages) {
  436. err = -ENOMEM;
  437. goto out;
  438. }
  439. spin_lock_irq(&b->bm_lock);
  440. opages = b->bm_pages;
  441. owords = b->bm_words;
  442. obits = b->bm_bits;
  443. growing = bits > obits;
  444. if (opages && growing && set_new_bits)
  445. bm_set_surplus(b);
  446. b->bm_pages = npages;
  447. b->bm_number_of_pages = want;
  448. b->bm_bits = bits;
  449. b->bm_words = words;
  450. b->bm_dev_capacity = capacity;
  451. if (growing) {
  452. if (set_new_bits) {
  453. bm_memset(b, owords, 0xff, words-owords);
  454. b->bm_set += bits - obits;
  455. } else
  456. bm_memset(b, owords, 0x00, words-owords);
  457. }
  458. if (want < have) {
  459. /* implicit: (opages != NULL) && (opages != npages) */
  460. bm_free_pages(opages + want, have - want);
  461. }
  462. p_addr = bm_map_paddr(b, words);
  463. bm = p_addr + MLPP(words);
  464. *bm = DRBD_MAGIC;
  465. bm_unmap(p_addr);
  466. (void)bm_clear_surplus(b);
  467. spin_unlock_irq(&b->bm_lock);
  468. if (opages != npages)
  469. bm_vk_free(opages, opages_vmalloced);
  470. if (!growing)
  471. b->bm_set = bm_count_bits(b);
  472. dev_info(DEV, "resync bitmap: bits=%lu words=%lu\n", bits, words);
  473. out:
  474. drbd_bm_unlock(mdev);
  475. return err;
  476. }
  477. /* inherently racy:
  478. * if not protected by other means, return value may be out of date when
  479. * leaving this function...
  480. * we still need to lock it, since it is important that this returns
  481. * bm_set == 0 precisely.
  482. *
  483. * maybe bm_set should be atomic_t ?
  484. */
  485. unsigned long _drbd_bm_total_weight(struct drbd_conf *mdev)
  486. {
  487. struct drbd_bitmap *b = mdev->bitmap;
  488. unsigned long s;
  489. unsigned long flags;
  490. ERR_IF(!b) return 0;
  491. ERR_IF(!b->bm_pages) return 0;
  492. spin_lock_irqsave(&b->bm_lock, flags);
  493. s = b->bm_set;
  494. spin_unlock_irqrestore(&b->bm_lock, flags);
  495. return s;
  496. }
  497. unsigned long drbd_bm_total_weight(struct drbd_conf *mdev)
  498. {
  499. unsigned long s;
  500. /* if I don't have a disk, I don't know about out-of-sync status */
  501. if (!get_ldev_if_state(mdev, D_NEGOTIATING))
  502. return 0;
  503. s = _drbd_bm_total_weight(mdev);
  504. put_ldev(mdev);
  505. return s;
  506. }
  507. size_t drbd_bm_words(struct drbd_conf *mdev)
  508. {
  509. struct drbd_bitmap *b = mdev->bitmap;
  510. ERR_IF(!b) return 0;
  511. ERR_IF(!b->bm_pages) return 0;
  512. return b->bm_words;
  513. }
  514. unsigned long drbd_bm_bits(struct drbd_conf *mdev)
  515. {
  516. struct drbd_bitmap *b = mdev->bitmap;
  517. ERR_IF(!b) return 0;
  518. return b->bm_bits;
  519. }
  520. /* merge number words from buffer into the bitmap starting at offset.
  521. * buffer[i] is expected to be little endian unsigned long.
  522. * bitmap must be locked by drbd_bm_lock.
  523. * currently only used from receive_bitmap.
  524. */
  525. void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, size_t number,
  526. unsigned long *buffer)
  527. {
  528. struct drbd_bitmap *b = mdev->bitmap;
  529. unsigned long *p_addr, *bm;
  530. unsigned long word, bits;
  531. size_t end, do_now;
  532. end = offset + number;
  533. ERR_IF(!b) return;
  534. ERR_IF(!b->bm_pages) return;
  535. if (number == 0)
  536. return;
  537. WARN_ON(offset >= b->bm_words);
  538. WARN_ON(end > b->bm_words);
  539. spin_lock_irq(&b->bm_lock);
  540. while (offset < end) {
  541. do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset;
  542. p_addr = bm_map_paddr(b, offset);
  543. bm = p_addr + MLPP(offset);
  544. offset += do_now;
  545. while (do_now--) {
  546. bits = hweight_long(*bm);
  547. word = *bm | lel_to_cpu(*buffer++);
  548. *bm++ = word;
  549. b->bm_set += hweight_long(word) - bits;
  550. }
  551. bm_unmap(p_addr);
  552. }
  553. /* with 32bit <-> 64bit cross-platform connect
  554. * this is only correct for current usage,
  555. * where we _know_ that we are 64 bit aligned,
  556. * and know that this function is used in this way, too...
  557. */
  558. if (end == b->bm_words)
  559. b->bm_set -= bm_clear_surplus(b);
  560. spin_unlock_irq(&b->bm_lock);
  561. }
  562. /* copy number words from the bitmap starting at offset into the buffer.
  563. * buffer[i] will be little endian unsigned long.
  564. */
  565. void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset, size_t number,
  566. unsigned long *buffer)
  567. {
  568. struct drbd_bitmap *b = mdev->bitmap;
  569. unsigned long *p_addr, *bm;
  570. size_t end, do_now;
  571. end = offset + number;
  572. ERR_IF(!b) return;
  573. ERR_IF(!b->bm_pages) return;
  574. spin_lock_irq(&b->bm_lock);
  575. if ((offset >= b->bm_words) ||
  576. (end > b->bm_words) ||
  577. (number <= 0))
  578. dev_err(DEV, "offset=%lu number=%lu bm_words=%lu\n",
  579. (unsigned long) offset,
  580. (unsigned long) number,
  581. (unsigned long) b->bm_words);
  582. else {
  583. while (offset < end) {
  584. do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset;
  585. p_addr = bm_map_paddr(b, offset);
  586. bm = p_addr + MLPP(offset);
  587. offset += do_now;
  588. while (do_now--)
  589. *buffer++ = cpu_to_lel(*bm++);
  590. bm_unmap(p_addr);
  591. }
  592. }
  593. spin_unlock_irq(&b->bm_lock);
  594. }
  595. /* set all bits in the bitmap */
  596. void drbd_bm_set_all(struct drbd_conf *mdev)
  597. {
  598. struct drbd_bitmap *b = mdev->bitmap;
  599. ERR_IF(!b) return;
  600. ERR_IF(!b->bm_pages) return;
  601. spin_lock_irq(&b->bm_lock);
  602. bm_memset(b, 0, 0xff, b->bm_words);
  603. (void)bm_clear_surplus(b);
  604. b->bm_set = b->bm_bits;
  605. spin_unlock_irq(&b->bm_lock);
  606. }
  607. /* clear all bits in the bitmap */
  608. void drbd_bm_clear_all(struct drbd_conf *mdev)
  609. {
  610. struct drbd_bitmap *b = mdev->bitmap;
  611. ERR_IF(!b) return;
  612. ERR_IF(!b->bm_pages) return;
  613. spin_lock_irq(&b->bm_lock);
  614. bm_memset(b, 0, 0, b->bm_words);
  615. b->bm_set = 0;
  616. spin_unlock_irq(&b->bm_lock);
  617. }
  618. static void bm_async_io_complete(struct bio *bio, int error)
  619. {
  620. struct drbd_bitmap *b = bio->bi_private;
  621. int uptodate = bio_flagged(bio, BIO_UPTODATE);
  622. /* strange behavior of some lower level drivers...
  623. * fail the request by clearing the uptodate flag,
  624. * but do not return any error?!
  625. * do we want to WARN() on this? */
  626. if (!error && !uptodate)
  627. error = -EIO;
  628. if (error) {
  629. /* doh. what now?
  630. * for now, set all bits, and flag MD_IO_ERROR */
  631. __set_bit(BM_MD_IO_ERROR, &b->bm_flags);
  632. }
  633. if (atomic_dec_and_test(&b->bm_async_io))
  634. wake_up(&b->bm_io_wait);
  635. bio_put(bio);
  636. }
  637. static void bm_page_io_async(struct drbd_conf *mdev, struct drbd_bitmap *b, int page_nr, int rw) __must_hold(local)
  638. {
  639. /* we are process context. we always get a bio */
  640. struct bio *bio = bio_alloc(GFP_KERNEL, 1);
  641. unsigned int len;
  642. sector_t on_disk_sector =
  643. mdev->ldev->md.md_offset + mdev->ldev->md.bm_offset;
  644. on_disk_sector += ((sector_t)page_nr) << (PAGE_SHIFT-9);
  645. /* this might happen with very small
  646. * flexible external meta data device */
  647. len = min_t(unsigned int, PAGE_SIZE,
  648. (drbd_md_last_sector(mdev->ldev) - on_disk_sector + 1)<<9);
  649. bio->bi_bdev = mdev->ldev->md_bdev;
  650. bio->bi_sector = on_disk_sector;
  651. bio_add_page(bio, b->bm_pages[page_nr], len, 0);
  652. bio->bi_private = b;
  653. bio->bi_end_io = bm_async_io_complete;
  654. if (drbd_insert_fault(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
  655. bio->bi_rw |= rw;
  656. bio_endio(bio, -EIO);
  657. } else {
  658. submit_bio(rw, bio);
  659. }
  660. }
  661. # if defined(__LITTLE_ENDIAN)
  662. /* nothing to do, on disk == in memory */
  663. # define bm_cpu_to_lel(x) ((void)0)
  664. # else
  665. static void bm_cpu_to_lel(struct drbd_bitmap *b)
  666. {
  667. /* need to cpu_to_lel all the pages ...
  668. * this may be optimized by using
  669. * cpu_to_lel(-1) == -1 and cpu_to_lel(0) == 0;
  670. * the following is still not optimal, but better than nothing */
  671. unsigned int i;
  672. unsigned long *p_addr, *bm;
  673. if (b->bm_set == 0) {
  674. /* no page at all; avoid swap if all is 0 */
  675. i = b->bm_number_of_pages;
  676. } else if (b->bm_set == b->bm_bits) {
  677. /* only the last page */
  678. i = b->bm_number_of_pages - 1;
  679. } else {
  680. /* all pages */
  681. i = 0;
  682. }
  683. for (; i < b->bm_number_of_pages; i++) {
  684. p_addr = kmap_atomic(b->bm_pages[i], KM_USER0);
  685. for (bm = p_addr; bm < p_addr + PAGE_SIZE/sizeof(long); bm++)
  686. *bm = cpu_to_lel(*bm);
  687. kunmap_atomic(p_addr, KM_USER0);
  688. }
  689. }
  690. # endif
  691. /* lel_to_cpu == cpu_to_lel */
  692. # define bm_lel_to_cpu(x) bm_cpu_to_lel(x)
  693. /*
  694. * bm_rw: read/write the whole bitmap from/to its on disk location.
  695. */
  696. static int bm_rw(struct drbd_conf *mdev, int rw) __must_hold(local)
  697. {
  698. struct drbd_bitmap *b = mdev->bitmap;
  699. /* sector_t sector; */
  700. int bm_words, num_pages, i;
  701. unsigned long now;
  702. char ppb[10];
  703. int err = 0;
  704. WARN_ON(!bm_is_locked(b));
  705. /* no spinlock here, the drbd_bm_lock should be enough! */
  706. bm_words = drbd_bm_words(mdev);
  707. num_pages = (bm_words*sizeof(long) + PAGE_SIZE-1) >> PAGE_SHIFT;
  708. /* on disk bitmap is little endian */
  709. if (rw == WRITE)
  710. bm_cpu_to_lel(b);
  711. now = jiffies;
  712. atomic_set(&b->bm_async_io, num_pages);
  713. __clear_bit(BM_MD_IO_ERROR, &b->bm_flags);
  714. /* let the layers below us try to merge these bios... */
  715. for (i = 0; i < num_pages; i++)
  716. bm_page_io_async(mdev, b, i, rw);
  717. wait_event(b->bm_io_wait, atomic_read(&b->bm_async_io) == 0);
  718. if (test_bit(BM_MD_IO_ERROR, &b->bm_flags)) {
  719. dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n");
  720. drbd_chk_io_error(mdev, 1, TRUE);
  721. err = -EIO;
  722. }
  723. now = jiffies;
  724. if (rw == WRITE) {
  725. /* swap back endianness */
  726. bm_lel_to_cpu(b);
  727. /* flush bitmap to stable storage */
  728. drbd_md_flush(mdev);
  729. } else /* rw == READ */ {
  730. /* just read, if necessary adjust endianness */
  731. b->bm_set = bm_count_bits_swap_endian(b);
  732. dev_info(DEV, "recounting of set bits took additional %lu jiffies\n",
  733. jiffies - now);
  734. }
  735. now = b->bm_set;
  736. dev_info(DEV, "%s (%lu bits) marked out-of-sync by on disk bit-map.\n",
  737. ppsize(ppb, now << (BM_BLOCK_SHIFT-10)), now);
  738. return err;
  739. }
  740. /**
  741. * drbd_bm_read() - Read the whole bitmap from its on disk location.
  742. * @mdev: DRBD device.
  743. */
  744. int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local)
  745. {
  746. return bm_rw(mdev, READ);
  747. }
  748. /**
  749. * drbd_bm_write() - Write the whole bitmap to its on disk location.
  750. * @mdev: DRBD device.
  751. */
  752. int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local)
  753. {
  754. return bm_rw(mdev, WRITE);
  755. }
  756. /**
  757. * drbd_bm_write_sect: Writes a 512 (MD_SECTOR_SIZE) byte piece of the bitmap
  758. * @mdev: DRBD device.
  759. * @enr: Extent number in the resync lru (happens to be sector offset)
  760. *
  761. * The BM_EXT_SIZE is on purpose exactly the amount of the bitmap covered
  762. * by a single sector write. Therefore enr == sector offset from the
  763. * start of the bitmap.
  764. */
  765. int drbd_bm_write_sect(struct drbd_conf *mdev, unsigned long enr) __must_hold(local)
  766. {
  767. sector_t on_disk_sector = enr + mdev->ldev->md.md_offset
  768. + mdev->ldev->md.bm_offset;
  769. int bm_words, num_words, offset;
  770. int err = 0;
  771. mutex_lock(&mdev->md_io_mutex);
  772. bm_words = drbd_bm_words(mdev);
  773. offset = S2W(enr); /* word offset into bitmap */
  774. num_words = min(S2W(1), bm_words - offset);
  775. if (num_words < S2W(1))
  776. memset(page_address(mdev->md_io_page), 0, MD_SECTOR_SIZE);
  777. drbd_bm_get_lel(mdev, offset, num_words,
  778. page_address(mdev->md_io_page));
  779. if (!drbd_md_sync_page_io(mdev, mdev->ldev, on_disk_sector, WRITE)) {
  780. int i;
  781. err = -EIO;
  782. dev_err(DEV, "IO ERROR writing bitmap sector %lu "
  783. "(meta-disk sector %llus)\n",
  784. enr, (unsigned long long)on_disk_sector);
  785. drbd_chk_io_error(mdev, 1, TRUE);
  786. for (i = 0; i < AL_EXT_PER_BM_SECT; i++)
  787. drbd_bm_ALe_set_all(mdev, enr*AL_EXT_PER_BM_SECT+i);
  788. }
  789. mdev->bm_writ_cnt++;
  790. mutex_unlock(&mdev->md_io_mutex);
  791. return err;
  792. }
  793. /* NOTE
  794. * find_first_bit returns int, we return unsigned long.
  795. * should not make much difference anyways, but ...
  796. *
  797. * this returns a bit number, NOT a sector!
  798. */
  799. #define BPP_MASK ((1UL << (PAGE_SHIFT+3)) - 1)
  800. static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo,
  801. const int find_zero_bit, const enum km_type km)
  802. {
  803. struct drbd_bitmap *b = mdev->bitmap;
  804. unsigned long i = -1UL;
  805. unsigned long *p_addr;
  806. unsigned long bit_offset; /* bit offset of the mapped page. */
  807. if (bm_fo > b->bm_bits) {
  808. dev_err(DEV, "bm_fo=%lu bm_bits=%lu\n", bm_fo, b->bm_bits);
  809. } else {
  810. while (bm_fo < b->bm_bits) {
  811. unsigned long offset;
  812. bit_offset = bm_fo & ~BPP_MASK; /* bit offset of the page */
  813. offset = bit_offset >> LN2_BPL; /* word offset of the page */
  814. p_addr = __bm_map_paddr(b, offset, km);
  815. if (find_zero_bit)
  816. i = find_next_zero_bit(p_addr, PAGE_SIZE*8, bm_fo & BPP_MASK);
  817. else
  818. i = find_next_bit(p_addr, PAGE_SIZE*8, bm_fo & BPP_MASK);
  819. __bm_unmap(p_addr, km);
  820. if (i < PAGE_SIZE*8) {
  821. i = bit_offset + i;
  822. if (i >= b->bm_bits)
  823. break;
  824. goto found;
  825. }
  826. bm_fo = bit_offset + PAGE_SIZE*8;
  827. }
  828. i = -1UL;
  829. }
  830. found:
  831. return i;
  832. }
  833. static unsigned long bm_find_next(struct drbd_conf *mdev,
  834. unsigned long bm_fo, const int find_zero_bit)
  835. {
  836. struct drbd_bitmap *b = mdev->bitmap;
  837. unsigned long i = -1UL;
  838. ERR_IF(!b) return i;
  839. ERR_IF(!b->bm_pages) return i;
  840. spin_lock_irq(&b->bm_lock);
  841. if (bm_is_locked(b))
  842. bm_print_lock_info(mdev);
  843. i = __bm_find_next(mdev, bm_fo, find_zero_bit, KM_IRQ1);
  844. spin_unlock_irq(&b->bm_lock);
  845. return i;
  846. }
  847. unsigned long drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo)
  848. {
  849. return bm_find_next(mdev, bm_fo, 0);
  850. }
  851. #if 0
  852. /* not yet needed for anything. */
  853. unsigned long drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo)
  854. {
  855. return bm_find_next(mdev, bm_fo, 1);
  856. }
  857. #endif
  858. /* does not spin_lock_irqsave.
  859. * you must take drbd_bm_lock() first */
  860. unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo)
  861. {
  862. /* WARN_ON(!bm_is_locked(mdev)); */
  863. return __bm_find_next(mdev, bm_fo, 0, KM_USER1);
  864. }
  865. unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo)
  866. {
  867. /* WARN_ON(!bm_is_locked(mdev)); */
  868. return __bm_find_next(mdev, bm_fo, 1, KM_USER1);
  869. }
  870. /* returns number of bits actually changed.
  871. * for val != 0, we change 0 -> 1, return code positive
  872. * for val == 0, we change 1 -> 0, return code negative
  873. * wants bitnr, not sector.
  874. * expected to be called for only a few bits (e - s about BITS_PER_LONG).
  875. * Must hold bitmap lock already. */
  876. static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
  877. unsigned long e, int val, const enum km_type km)
  878. {
  879. struct drbd_bitmap *b = mdev->bitmap;
  880. unsigned long *p_addr = NULL;
  881. unsigned long bitnr;
  882. unsigned long last_page_nr = -1UL;
  883. int c = 0;
  884. if (e >= b->bm_bits) {
  885. dev_err(DEV, "ASSERT FAILED: bit_s=%lu bit_e=%lu bm_bits=%lu\n",
  886. s, e, b->bm_bits);
  887. e = b->bm_bits ? b->bm_bits -1 : 0;
  888. }
  889. for (bitnr = s; bitnr <= e; bitnr++) {
  890. unsigned long offset = bitnr>>LN2_BPL;
  891. unsigned long page_nr = offset >> (PAGE_SHIFT - LN2_BPL + 3);
  892. if (page_nr != last_page_nr) {
  893. if (p_addr)
  894. __bm_unmap(p_addr, km);
  895. p_addr = __bm_map_paddr(b, offset, km);
  896. last_page_nr = page_nr;
  897. }
  898. if (val)
  899. c += (0 == __test_and_set_bit(bitnr & BPP_MASK, p_addr));
  900. else
  901. c -= (0 != __test_and_clear_bit(bitnr & BPP_MASK, p_addr));
  902. }
  903. if (p_addr)
  904. __bm_unmap(p_addr, km);
  905. b->bm_set += c;
  906. return c;
  907. }
  908. /* returns number of bits actually changed.
  909. * for val != 0, we change 0 -> 1, return code positive
  910. * for val == 0, we change 1 -> 0, return code negative
  911. * wants bitnr, not sector */
  912. static int bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
  913. const unsigned long e, int val)
  914. {
  915. unsigned long flags;
  916. struct drbd_bitmap *b = mdev->bitmap;
  917. int c = 0;
  918. ERR_IF(!b) return 1;
  919. ERR_IF(!b->bm_pages) return 0;
  920. spin_lock_irqsave(&b->bm_lock, flags);
  921. if (bm_is_locked(b))
  922. bm_print_lock_info(mdev);
  923. c = __bm_change_bits_to(mdev, s, e, val, KM_IRQ1);
  924. spin_unlock_irqrestore(&b->bm_lock, flags);
  925. return c;
  926. }
  927. /* returns number of bits changed 0 -> 1 */
  928. int drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
  929. {
  930. return bm_change_bits_to(mdev, s, e, 1);
  931. }
  932. /* returns number of bits changed 1 -> 0 */
  933. int drbd_bm_clear_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
  934. {
  935. return -bm_change_bits_to(mdev, s, e, 0);
  936. }
  937. /* sets all bits in full words,
  938. * from first_word up to, but not including, last_word */
  939. static inline void bm_set_full_words_within_one_page(struct drbd_bitmap *b,
  940. int page_nr, int first_word, int last_word)
  941. {
  942. int i;
  943. int bits;
  944. unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr], KM_USER0);
  945. for (i = first_word; i < last_word; i++) {
  946. bits = hweight_long(paddr[i]);
  947. paddr[i] = ~0UL;
  948. b->bm_set += BITS_PER_LONG - bits;
  949. }
  950. kunmap_atomic(paddr, KM_USER0);
  951. }
  952. /* Same thing as drbd_bm_set_bits, but without taking the spin_lock_irqsave.
  953. * You must first drbd_bm_lock().
  954. * Can be called to set the whole bitmap in one go.
  955. * Sets bits from s to e _inclusive_. */
  956. void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
  957. {
  958. /* First set_bit from the first bit (s)
  959. * up to the next long boundary (sl),
  960. * then assign full words up to the last long boundary (el),
  961. * then set_bit up to and including the last bit (e).
  962. *
  963. * Do not use memset, because we must account for changes,
  964. * so we need to loop over the words with hweight() anyways.
  965. */
  966. unsigned long sl = ALIGN(s,BITS_PER_LONG);
  967. unsigned long el = (e+1) & ~((unsigned long)BITS_PER_LONG-1);
  968. int first_page;
  969. int last_page;
  970. int page_nr;
  971. int first_word;
  972. int last_word;
  973. if (e - s <= 3*BITS_PER_LONG) {
  974. /* don't bother; el and sl may even be wrong. */
  975. __bm_change_bits_to(mdev, s, e, 1, KM_USER0);
  976. return;
  977. }
  978. /* difference is large enough that we can trust sl and el */
  979. /* bits filling the current long */
  980. if (sl)
  981. __bm_change_bits_to(mdev, s, sl-1, 1, KM_USER0);
  982. first_page = sl >> (3 + PAGE_SHIFT);
  983. last_page = el >> (3 + PAGE_SHIFT);
  984. /* MLPP: modulo longs per page */
  985. /* LWPP: long words per page */
  986. first_word = MLPP(sl >> LN2_BPL);
  987. last_word = LWPP;
  988. /* first and full pages, unless first page == last page */
  989. for (page_nr = first_page; page_nr < last_page; page_nr++) {
  990. bm_set_full_words_within_one_page(mdev->bitmap, page_nr, first_word, last_word);
  991. cond_resched();
  992. first_word = 0;
  993. }
  994. /* last page (respectively only page, for first page == last page) */
  995. last_word = MLPP(el >> LN2_BPL);
  996. bm_set_full_words_within_one_page(mdev->bitmap, last_page, first_word, last_word);
  997. /* possibly trailing bits.
  998. * example: (e & 63) == 63, el will be e+1.
  999. * if that even was the very last bit,
  1000. * it would trigger an assert in __bm_change_bits_to()
  1001. */
  1002. if (el <= e)
  1003. __bm_change_bits_to(mdev, el, e, 1, KM_USER0);
  1004. }
  1005. /* returns bit state
  1006. * wants bitnr, NOT sector.
  1007. * inherently racy... area needs to be locked by means of {al,rs}_lru
  1008. * 1 ... bit set
  1009. * 0 ... bit not set
  1010. * -1 ... first out of bounds access, stop testing for bits!
  1011. */
  1012. int drbd_bm_test_bit(struct drbd_conf *mdev, const unsigned long bitnr)
  1013. {
  1014. unsigned long flags;
  1015. struct drbd_bitmap *b = mdev->bitmap;
  1016. unsigned long *p_addr;
  1017. int i;
  1018. ERR_IF(!b) return 0;
  1019. ERR_IF(!b->bm_pages) return 0;
  1020. spin_lock_irqsave(&b->bm_lock, flags);
  1021. if (bm_is_locked(b))
  1022. bm_print_lock_info(mdev);
  1023. if (bitnr < b->bm_bits) {
  1024. unsigned long offset = bitnr>>LN2_BPL;
  1025. p_addr = bm_map_paddr(b, offset);
  1026. i = test_bit(bitnr & BPP_MASK, p_addr) ? 1 : 0;
  1027. bm_unmap(p_addr);
  1028. } else if (bitnr == b->bm_bits) {
  1029. i = -1;
  1030. } else { /* (bitnr > b->bm_bits) */
  1031. dev_err(DEV, "bitnr=%lu > bm_bits=%lu\n", bitnr, b->bm_bits);
  1032. i = 0;
  1033. }
  1034. spin_unlock_irqrestore(&b->bm_lock, flags);
  1035. return i;
  1036. }
  1037. /* returns number of bits set in the range [s, e] */
  1038. int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
  1039. {
  1040. unsigned long flags;
  1041. struct drbd_bitmap *b = mdev->bitmap;
  1042. unsigned long *p_addr = NULL, page_nr = -1;
  1043. unsigned long bitnr;
  1044. int c = 0;
  1045. size_t w;
  1046. /* If this is called without a bitmap, that is a bug. But just to be
  1047. * robust in case we screwed up elsewhere, in that case pretend there
  1048. * was one dirty bit in the requested area, so we won't try to do a
  1049. * local read there (no bitmap probably implies no disk) */
  1050. ERR_IF(!b) return 1;
  1051. ERR_IF(!b->bm_pages) return 1;
  1052. spin_lock_irqsave(&b->bm_lock, flags);
  1053. if (bm_is_locked(b))
  1054. bm_print_lock_info(mdev);
  1055. for (bitnr = s; bitnr <= e; bitnr++) {
  1056. w = bitnr >> LN2_BPL;
  1057. if (page_nr != w >> (PAGE_SHIFT - LN2_BPL + 3)) {
  1058. page_nr = w >> (PAGE_SHIFT - LN2_BPL + 3);
  1059. if (p_addr)
  1060. bm_unmap(p_addr);
  1061. p_addr = bm_map_paddr(b, w);
  1062. }
  1063. ERR_IF (bitnr >= b->bm_bits) {
  1064. dev_err(DEV, "bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits);
  1065. } else {
  1066. c += (0 != test_bit(bitnr - (page_nr << (PAGE_SHIFT+3)), p_addr));
  1067. }
  1068. }
  1069. if (p_addr)
  1070. bm_unmap(p_addr);
  1071. spin_unlock_irqrestore(&b->bm_lock, flags);
  1072. return c;
  1073. }
  1074. /* inherently racy...
  1075. * return value may be already out-of-date when this function returns.
  1076. * but the general usage is that this is only use during a cstate when bits are
  1077. * only cleared, not set, and typically only care for the case when the return
  1078. * value is zero, or we already "locked" this "bitmap extent" by other means.
  1079. *
  1080. * enr is bm-extent number, since we chose to name one sector (512 bytes)
  1081. * worth of the bitmap a "bitmap extent".
  1082. *
  1083. * TODO
  1084. * I think since we use it like a reference count, we should use the real
  1085. * reference count of some bitmap extent element from some lru instead...
  1086. *
  1087. */
  1088. int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr)
  1089. {
  1090. struct drbd_bitmap *b = mdev->bitmap;
  1091. int count, s, e;
  1092. unsigned long flags;
  1093. unsigned long *p_addr, *bm;
  1094. ERR_IF(!b) return 0;
  1095. ERR_IF(!b->bm_pages) return 0;
  1096. spin_lock_irqsave(&b->bm_lock, flags);
  1097. if (bm_is_locked(b))
  1098. bm_print_lock_info(mdev);
  1099. s = S2W(enr);
  1100. e = min((size_t)S2W(enr+1), b->bm_words);
  1101. count = 0;
  1102. if (s < b->bm_words) {
  1103. int n = e-s;
  1104. p_addr = bm_map_paddr(b, s);
  1105. bm = p_addr + MLPP(s);
  1106. while (n--)
  1107. count += hweight_long(*bm++);
  1108. bm_unmap(p_addr);
  1109. } else {
  1110. dev_err(DEV, "start offset (%d) too large in drbd_bm_e_weight\n", s);
  1111. }
  1112. spin_unlock_irqrestore(&b->bm_lock, flags);
  1113. return count;
  1114. }
  1115. /* set all bits covered by the AL-extent al_enr */
  1116. unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, unsigned long al_enr)
  1117. {
  1118. struct drbd_bitmap *b = mdev->bitmap;
  1119. unsigned long *p_addr, *bm;
  1120. unsigned long weight;
  1121. int count, s, e, i, do_now;
  1122. ERR_IF(!b) return 0;
  1123. ERR_IF(!b->bm_pages) return 0;
  1124. spin_lock_irq(&b->bm_lock);
  1125. if (bm_is_locked(b))
  1126. bm_print_lock_info(mdev);
  1127. weight = b->bm_set;
  1128. s = al_enr * BM_WORDS_PER_AL_EXT;
  1129. e = min_t(size_t, s + BM_WORDS_PER_AL_EXT, b->bm_words);
  1130. /* assert that s and e are on the same page */
  1131. D_ASSERT((e-1) >> (PAGE_SHIFT - LN2_BPL + 3)
  1132. == s >> (PAGE_SHIFT - LN2_BPL + 3));
  1133. count = 0;
  1134. if (s < b->bm_words) {
  1135. i = do_now = e-s;
  1136. p_addr = bm_map_paddr(b, s);
  1137. bm = p_addr + MLPP(s);
  1138. while (i--) {
  1139. count += hweight_long(*bm);
  1140. *bm = -1UL;
  1141. bm++;
  1142. }
  1143. bm_unmap(p_addr);
  1144. b->bm_set += do_now*BITS_PER_LONG - count;
  1145. if (e == b->bm_words)
  1146. b->bm_set -= bm_clear_surplus(b);
  1147. } else {
  1148. dev_err(DEV, "start offset (%d) too large in drbd_bm_ALe_set_all\n", s);
  1149. }
  1150. weight = b->bm_set - weight;
  1151. spin_unlock_irq(&b->bm_lock);
  1152. return weight;
  1153. }