|
@@ -648,7 +648,7 @@ skip_lpage:
|
|
|
|
|
|
/* Allocate page dirty bitmap if needed */
|
|
|
if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
|
|
|
- unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
|
|
|
+ unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(&new);
|
|
|
|
|
|
new.dirty_bitmap = vmalloc(dirty_bytes);
|
|
|
if (!new.dirty_bitmap)
|
|
@@ -768,7 +768,7 @@ int kvm_get_dirty_log(struct kvm *kvm,
|
|
|
{
|
|
|
struct kvm_memory_slot *memslot;
|
|
|
int r, i;
|
|
|
- int n;
|
|
|
+ unsigned long n;
|
|
|
unsigned long any = 0;
|
|
|
|
|
|
r = -EINVAL;
|
|
@@ -780,7 +780,7 @@ int kvm_get_dirty_log(struct kvm *kvm,
|
|
|
if (!memslot->dirty_bitmap)
|
|
|
goto out;
|
|
|
|
|
|
- n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
|
|
|
+ n = kvm_dirty_bitmap_bytes(memslot);
|
|
|
|
|
|
for (i = 0; !any && i < n/sizeof(long); ++i)
|
|
|
any = memslot->dirty_bitmap[i];
|
|
@@ -1186,10 +1186,13 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
|
|
|
memslot = gfn_to_memslot_unaliased(kvm, gfn);
|
|
|
if (memslot && memslot->dirty_bitmap) {
|
|
|
unsigned long rel_gfn = gfn - memslot->base_gfn;
|
|
|
+ unsigned long *p = memslot->dirty_bitmap +
|
|
|
+ rel_gfn / BITS_PER_LONG;
|
|
|
+ int offset = rel_gfn % BITS_PER_LONG;
|
|
|
|
|
|
/* avoid RMW */
|
|
|
- if (!generic_test_le_bit(rel_gfn, memslot->dirty_bitmap))
|
|
|
- generic___set_le_bit(rel_gfn, memslot->dirty_bitmap);
|
|
|
+ if (!generic_test_le_bit(offset, p))
|
|
|
+ generic___set_le_bit(offset, p);
|
|
|
}
|
|
|
}
|
|
|
|