@@ -656,14 +656,25 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
656
656
return 0 ;
657
657
}
658
658
659
+ static gpa_t FNAME (get_level1_sp_gpa )(struct kvm_mmu_page * sp )
660
+ {
661
+ int offset = 0 ;
662
+
663
+ WARN_ON (sp -> role .level != 1 );
664
+
665
+ if (PTTYPE == 32 )
666
+ offset = sp -> role .quadrant << PT64_LEVEL_BITS ;
667
+
668
+ return gfn_to_gpa (sp -> gfn ) + offset * sizeof (pt_element_t );
669
+ }
670
+
659
671
static void FNAME (invlpg )(struct kvm_vcpu * vcpu , gva_t gva )
660
672
{
661
673
struct kvm_shadow_walk_iterator iterator ;
662
674
struct kvm_mmu_page * sp ;
663
675
gpa_t pte_gpa = -1 ;
664
676
int level ;
665
677
u64 * sptep ;
666
- int need_flush = 0 ;
667
678
668
679
vcpu_clear_mmio_info (vcpu , gva );
669
680
@@ -675,36 +686,20 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
675
686
676
687
sp = page_header (__pa (sptep ));
677
688
if (is_last_spte (* sptep , level )) {
678
- int offset , shift ;
679
-
680
689
if (!sp -> unsync )
681
690
break ;
682
691
683
- shift = PAGE_SHIFT -
684
- (PT_LEVEL_BITS - PT64_LEVEL_BITS ) * level ;
685
- offset = sp -> role .quadrant << shift ;
686
-
687
- pte_gpa = (sp -> gfn << PAGE_SHIFT ) + offset ;
692
+ pte_gpa = FNAME (get_level1_sp_gpa )(sp );
688
693
pte_gpa += (sptep - sp -> spt ) * sizeof (pt_element_t );
689
694
690
- if (is_shadow_present_pte (* sptep )) {
691
- if (is_large_pte (* sptep ))
692
- -- vcpu -> kvm -> stat .lpages ;
693
- drop_spte (vcpu -> kvm , sptep );
694
- need_flush = 1 ;
695
- } else if (is_mmio_spte (* sptep ))
696
- mmu_spte_clear_no_track (sptep );
697
-
698
- break ;
695
+ if (mmu_page_zap_pte (vcpu -> kvm , sp , sptep ))
696
+ kvm_flush_remote_tlbs (vcpu -> kvm );
699
697
}
700
698
701
699
if (!is_shadow_present_pte (* sptep ) || !sp -> unsync_children )
702
700
break ;
703
701
}
704
702
705
- if (need_flush )
706
- kvm_flush_remote_tlbs (vcpu -> kvm );
707
-
708
703
atomic_inc (& vcpu -> kvm -> arch .invlpg_counter );
709
704
710
705
spin_unlock (& vcpu -> kvm -> mmu_lock );
@@ -769,19 +764,14 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
769
764
*/
770
765
static int FNAME (sync_page )(struct kvm_vcpu * vcpu , struct kvm_mmu_page * sp )
771
766
{
772
- int i , offset , nr_present ;
767
+ int i , nr_present = 0 ;
773
768
bool host_writable ;
774
769
gpa_t first_pte_gpa ;
775
770
776
- offset = nr_present = 0 ;
777
-
778
771
/* direct kvm_mmu_page can not be unsync. */
779
772
BUG_ON (sp -> role .direct );
780
773
781
- if (PTTYPE == 32 )
782
- offset = sp -> role .quadrant << PT64_LEVEL_BITS ;
783
-
784
- first_pte_gpa = gfn_to_gpa (sp -> gfn ) + offset * sizeof (pt_element_t );
774
+ first_pte_gpa = FNAME (get_level1_sp_gpa )(sp );
785
775
786
776
for (i = 0 ; i < PT64_ENT_PER_PAGE ; i ++ ) {
787
777
unsigned pte_access ;
0 commit comments