@@ -268,24 +268,45 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
268
268
return err ;
269
269
}
270
270
271
- static void kvmppc_update_vpa (struct kvm * kvm , struct kvmppc_vpa * vpap )
271
+ static void kvmppc_update_vpa (struct kvm_vcpu * vcpu , struct kvmppc_vpa * vpap )
272
272
{
273
+ struct kvm * kvm = vcpu -> kvm ;
273
274
void * va ;
274
275
unsigned long nb ;
276
+ unsigned long gpa ;
275
277
276
- vpap -> update_pending = 0 ;
277
- va = NULL ;
278
- if (vpap -> next_gpa ) {
279
- va = kvmppc_pin_guest_page (kvm , vpap -> next_gpa , & nb );
280
- if (nb < vpap -> len ) {
281
- /*
282
- * If it's now too short, it must be that userspace
283
- * has changed the mappings underlying guest memory,
284
- * so unregister the region.
285
- */
278
+ /*
279
+ * We need to pin the page pointed to by vpap->next_gpa,
280
+ * but we can't call kvmppc_pin_guest_page under the lock
281
+ * as it does get_user_pages() and down_read(). So we
282
+ * have to drop the lock, pin the page, then get the lock
283
+ * again and check that a new area didn't get registered
284
+ * in the meantime.
285
+ */
286
+ for (;;) {
287
+ gpa = vpap -> next_gpa ;
288
+ spin_unlock (& vcpu -> arch .vpa_update_lock );
289
+ va = NULL ;
290
+ nb = 0 ;
291
+ if (gpa )
292
+ va = kvmppc_pin_guest_page (kvm , vpap -> next_gpa , & nb );
293
+ spin_lock (& vcpu -> arch .vpa_update_lock );
294
+ if (gpa == vpap -> next_gpa )
295
+ break ;
296
+ /* sigh... unpin that one and try again */
297
+ if (va )
286
298
kvmppc_unpin_guest_page (kvm , va );
287
- va = NULL ;
288
- }
299
+ }
300
+
301
+ vpap -> update_pending = 0 ;
302
+ if (va && nb < vpap -> len ) {
303
+ /*
304
+ * If it's now too short, it must be that userspace
305
+ * has changed the mappings underlying guest memory,
306
+ * so unregister the region.
307
+ */
308
+ kvmppc_unpin_guest_page (kvm , va );
309
+ va = NULL ;
289
310
}
290
311
if (vpap -> pinned_addr )
291
312
kvmppc_unpin_guest_page (kvm , vpap -> pinned_addr );
@@ -296,20 +317,18 @@ static void kvmppc_update_vpa(struct kvm *kvm, struct kvmppc_vpa *vpap)
296
317
297
318
static void kvmppc_update_vpas (struct kvm_vcpu * vcpu )
298
319
{
299
- struct kvm * kvm = vcpu -> kvm ;
300
-
301
320
spin_lock (& vcpu -> arch .vpa_update_lock );
302
321
if (vcpu -> arch .vpa .update_pending ) {
303
- kvmppc_update_vpa (kvm , & vcpu -> arch .vpa );
322
+ kvmppc_update_vpa (vcpu , & vcpu -> arch .vpa );
304
323
init_vpa (vcpu , vcpu -> arch .vpa .pinned_addr );
305
324
}
306
325
if (vcpu -> arch .dtl .update_pending ) {
307
- kvmppc_update_vpa (kvm , & vcpu -> arch .dtl );
326
+ kvmppc_update_vpa (vcpu , & vcpu -> arch .dtl );
308
327
vcpu -> arch .dtl_ptr = vcpu -> arch .dtl .pinned_addr ;
309
328
vcpu -> arch .dtl_index = 0 ;
310
329
}
311
330
if (vcpu -> arch .slb_shadow .update_pending )
312
- kvmppc_update_vpa (kvm , & vcpu -> arch .slb_shadow );
331
+ kvmppc_update_vpa (vcpu , & vcpu -> arch .slb_shadow );
313
332
spin_unlock (& vcpu -> arch .vpa_update_lock );
314
333
}
315
334
@@ -800,12 +819,39 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc)
800
819
struct kvm_vcpu * vcpu , * vcpu0 , * vnext ;
801
820
long ret ;
802
821
u64 now ;
803
- int ptid , i ;
822
+ int ptid , i , need_vpa_update ;
804
823
805
824
/* don't start if any threads have a signal pending */
806
- list_for_each_entry (vcpu , & vc -> runnable_threads , arch .run_list )
825
+ need_vpa_update = 0 ;
826
+ list_for_each_entry (vcpu , & vc -> runnable_threads , arch .run_list ) {
807
827
if (signal_pending (vcpu -> arch .run_task ))
808
828
return 0 ;
829
+ need_vpa_update |= vcpu -> arch .vpa .update_pending |
830
+ vcpu -> arch .slb_shadow .update_pending |
831
+ vcpu -> arch .dtl .update_pending ;
832
+ }
833
+
834
+ /*
835
+ * Initialize *vc, in particular vc->vcore_state, so we can
836
+ * drop the vcore lock if necessary.
837
+ */
838
+ vc -> n_woken = 0 ;
839
+ vc -> nap_count = 0 ;
840
+ vc -> entry_exit_count = 0 ;
841
+ vc -> vcore_state = VCORE_RUNNING ;
842
+ vc -> in_guest = 0 ;
843
+ vc -> napping_threads = 0 ;
844
+
845
+ /*
846
+ * Updating any of the vpas requires calling kvmppc_pin_guest_page,
847
+ * which can't be called with any spinlocks held.
848
+ */
849
+ if (need_vpa_update ) {
850
+ spin_unlock (& vc -> lock );
851
+ list_for_each_entry (vcpu , & vc -> runnable_threads , arch .run_list )
852
+ kvmppc_update_vpas (vcpu );
853
+ spin_lock (& vc -> lock );
854
+ }
809
855
810
856
/*
811
857
* Make sure we are running on thread 0, and that
@@ -838,20 +884,10 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc)
838
884
if (vcpu -> arch .ceded )
839
885
vcpu -> arch .ptid = ptid ++ ;
840
886
841
- vc -> n_woken = 0 ;
842
- vc -> nap_count = 0 ;
843
- vc -> entry_exit_count = 0 ;
844
- vc -> vcore_state = VCORE_RUNNING ;
845
887
vc -> stolen_tb += mftb () - vc -> preempt_tb ;
846
- vc -> in_guest = 0 ;
847
888
vc -> pcpu = smp_processor_id ();
848
- vc -> napping_threads = 0 ;
849
889
list_for_each_entry (vcpu , & vc -> runnable_threads , arch .run_list ) {
850
890
kvmppc_start_thread (vcpu );
851
- if (vcpu -> arch .vpa .update_pending ||
852
- vcpu -> arch .slb_shadow .update_pending ||
853
- vcpu -> arch .dtl .update_pending )
854
- kvmppc_update_vpas (vcpu );
855
891
kvmppc_create_dtl_entry (vcpu , vc );
856
892
}
857
893
/* Grab any remaining hw threads so they can't go into the kernel */
0 commit comments