|
@@ -1644,16 +1644,14 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
|
|
|
static void kvm_mmu_commit_zap_page(struct kvm *kvm,
|
|
|
struct list_head *invalid_list);
|
|
|
|
|
|
-#define for_each_gfn_sp(kvm, sp, gfn) \
|
|
|
- hlist_for_each_entry(sp, \
|
|
|
- &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \
|
|
|
- if ((sp)->gfn != (gfn)) {} else
|
|
|
-
|
|
|
-#define for_each_gfn_indirect_valid_sp(kvm, sp, gfn) \
|
|
|
- hlist_for_each_entry(sp, \
|
|
|
- &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \
|
|
|
- if ((sp)->gfn != (gfn) || (sp)->role.direct || \
|
|
|
- (sp)->role.invalid) {} else
|
|
|
+#define for_each_gfn_sp(_kvm, _sp, _gfn) \
|
|
|
+ hlist_for_each_entry(_sp, \
|
|
|
+ &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \
|
|
|
+ if ((_sp)->gfn != (_gfn)) {} else
|
|
|
+
|
|
|
+#define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn) \
|
|
|
+ for_each_gfn_sp(_kvm, _sp, _gfn) \
|
|
|
+ if ((_sp)->role.direct || (_sp)->role.invalid) {} else
|
|
|
|
|
|
/* @sp->gfn should be write-protected at the call site */
|
|
|
static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
|