@@ -140,11 +140,110 @@ static inline int apic_enabled(struct kvm_lapic *apic)
140
140
(LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \
141
141
APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER)
142
142
143
+ static inline int apic_x2apic_mode (struct kvm_lapic * apic )
144
+ {
145
+ return apic -> vcpu -> arch .apic_base & X2APIC_ENABLE ;
146
+ }
147
+
143
148
static inline int kvm_apic_id (struct kvm_lapic * apic )
144
149
{
145
150
return (kvm_apic_get_reg (apic , APIC_ID ) >> 24 ) & 0xff ;
146
151
}
147
152
153
+ static inline u16 apic_cluster_id (struct kvm_apic_map * map , u32 ldr )
154
+ {
155
+ u16 cid ;
156
+ ldr >>= 32 - map -> ldr_bits ;
157
+ cid = (ldr >> map -> cid_shift ) & map -> cid_mask ;
158
+
159
+ BUG_ON (cid >= ARRAY_SIZE (map -> logical_map ));
160
+
161
+ return cid ;
162
+ }
163
+
164
+ static inline u16 apic_logical_id (struct kvm_apic_map * map , u32 ldr )
165
+ {
166
+ ldr >>= (32 - map -> ldr_bits );
167
+ return ldr & map -> lid_mask ;
168
+ }
169
+
170
+ static void recalculate_apic_map (struct kvm * kvm )
171
+ {
172
+ struct kvm_apic_map * new , * old = NULL ;
173
+ struct kvm_vcpu * vcpu ;
174
+ int i ;
175
+
176
+ new = kzalloc (sizeof (struct kvm_apic_map ), GFP_KERNEL );
177
+
178
+ mutex_lock (& kvm -> arch .apic_map_lock );
179
+
180
+ if (!new )
181
+ goto out ;
182
+
183
+ new -> ldr_bits = 8 ;
184
+ /* flat mode is default */
185
+ new -> cid_shift = 8 ;
186
+ new -> cid_mask = 0 ;
187
+ new -> lid_mask = 0xff ;
188
+
189
+ kvm_for_each_vcpu (i , vcpu , kvm ) {
190
+ struct kvm_lapic * apic = vcpu -> arch .apic ;
191
+ u16 cid , lid ;
192
+ u32 ldr ;
193
+
194
+ if (!kvm_apic_present (vcpu ))
195
+ continue ;
196
+
197
+ /*
198
+ * All APICs have to be configured in the same mode by an OS.
199
+ * We take advatage of this while building logical id loockup
200
+ * table. After reset APICs are in xapic/flat mode, so if we
201
+ * find apic with different setting we assume this is the mode
202
+ * OS wants all apics to be in; build lookup table accordingly.
203
+ */
204
+ if (apic_x2apic_mode (apic )) {
205
+ new -> ldr_bits = 32 ;
206
+ new -> cid_shift = 16 ;
207
+ new -> cid_mask = new -> lid_mask = 0xffff ;
208
+ } else if (kvm_apic_sw_enabled (apic ) &&
209
+ !new -> cid_mask /* flat mode */ &&
210
+ kvm_apic_get_reg (apic , APIC_DFR ) == APIC_DFR_CLUSTER ) {
211
+ new -> cid_shift = 4 ;
212
+ new -> cid_mask = 0xf ;
213
+ new -> lid_mask = 0xf ;
214
+ }
215
+
216
+ new -> phys_map [kvm_apic_id (apic )] = apic ;
217
+
218
+ ldr = kvm_apic_get_reg (apic , APIC_LDR );
219
+ cid = apic_cluster_id (new , ldr );
220
+ lid = apic_logical_id (new , ldr );
221
+
222
+ if (lid )
223
+ new -> logical_map [cid ][ffs (lid ) - 1 ] = apic ;
224
+ }
225
+ out :
226
+ old = rcu_dereference_protected (kvm -> arch .apic_map ,
227
+ lockdep_is_held (& kvm -> arch .apic_map_lock ));
228
+ rcu_assign_pointer (kvm -> arch .apic_map , new );
229
+ mutex_unlock (& kvm -> arch .apic_map_lock );
230
+
231
+ if (old )
232
+ kfree_rcu (old , rcu );
233
+ }
234
+
235
+ static inline void kvm_apic_set_id (struct kvm_lapic * apic , u8 id )
236
+ {
237
+ apic_set_reg (apic , APIC_ID , id << 24 );
238
+ recalculate_apic_map (apic -> vcpu -> kvm );
239
+ }
240
+
241
+ static inline void kvm_apic_set_ldr (struct kvm_lapic * apic , u32 id )
242
+ {
243
+ apic_set_reg (apic , APIC_LDR , id );
244
+ recalculate_apic_map (apic -> vcpu -> kvm );
245
+ }
246
+
148
247
static inline int apic_lvt_enabled (struct kvm_lapic * apic , int lvt_type )
149
248
{
150
249
return !(kvm_apic_get_reg (apic , lvt_type ) & APIC_LVT_MASKED );
@@ -194,11 +293,6 @@ void kvm_apic_set_version(struct kvm_vcpu *vcpu)
194
293
apic_set_reg (apic , APIC_LVR , v );
195
294
}
196
295
197
- static inline int apic_x2apic_mode (struct kvm_lapic * apic )
198
- {
199
- return apic -> vcpu -> arch .apic_base & X2APIC_ENABLE ;
200
- }
201
-
202
296
static const unsigned int apic_lvt_mask [APIC_LVT_NUM ] = {
203
297
LVT_MASK , /* part LVTT mask, timer mode mask added at runtime */
204
298
LVT_MASK | APIC_MODE_MASK , /* LVTTHMR */
@@ -483,6 +577,72 @@ int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
483
577
return result ;
484
578
}
485
579
580
+ bool kvm_irq_delivery_to_apic_fast (struct kvm * kvm , struct kvm_lapic * src ,
581
+ struct kvm_lapic_irq * irq , int * r )
582
+ {
583
+ struct kvm_apic_map * map ;
584
+ unsigned long bitmap = 1 ;
585
+ struct kvm_lapic * * dst ;
586
+ int i ;
587
+ bool ret = false;
588
+
589
+ * r = -1 ;
590
+
591
+ if (irq -> shorthand == APIC_DEST_SELF ) {
592
+ * r = kvm_apic_set_irq (src -> vcpu , irq );
593
+ return true;
594
+ }
595
+
596
+ if (irq -> shorthand )
597
+ return false;
598
+
599
+ rcu_read_lock ();
600
+ map = rcu_dereference (kvm -> arch .apic_map );
601
+
602
+ if (!map )
603
+ goto out ;
604
+
605
+ if (irq -> dest_mode == 0 ) { /* physical mode */
606
+ if (irq -> delivery_mode == APIC_DM_LOWEST ||
607
+ irq -> dest_id == 0xff )
608
+ goto out ;
609
+ dst = & map -> phys_map [irq -> dest_id & 0xff ];
610
+ } else {
611
+ u32 mda = irq -> dest_id << (32 - map -> ldr_bits );
612
+
613
+ dst = map -> logical_map [apic_cluster_id (map , mda )];
614
+
615
+ bitmap = apic_logical_id (map , mda );
616
+
617
+ if (irq -> delivery_mode == APIC_DM_LOWEST ) {
618
+ int l = -1 ;
619
+ for_each_set_bit (i , & bitmap , 16 ) {
620
+ if (!dst [i ])
621
+ continue ;
622
+ if (l < 0 )
623
+ l = i ;
624
+ else if (kvm_apic_compare_prio (dst [i ]-> vcpu , dst [l ]-> vcpu ) < 0 )
625
+ l = i ;
626
+ }
627
+
628
+ bitmap = (l >= 0 ) ? 1 << l : 0 ;
629
+ }
630
+ }
631
+
632
+ for_each_set_bit (i , & bitmap , 16 ) {
633
+ if (!dst [i ])
634
+ continue ;
635
+ if (* r < 0 )
636
+ * r = 0 ;
637
+ * r += kvm_apic_set_irq (dst [i ]-> vcpu , irq );
638
+ }
639
+
640
+ ret = true;
641
+ out :
642
+ rcu_read_unlock ();
643
+ return ret ;
644
+ }
645
+
486
646
/*
487
647
* Add a pending IRQ into lapic.
488
648
* Return 1 if successfully added and 0 if discarded.
@@ -886,7 +1046,7 @@ static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
886
1046
switch (reg ) {
887
1047
case APIC_ID : /* Local APIC ID */
888
1048
if (!apic_x2apic_mode (apic ))
889
- apic_set_reg (apic , APIC_ID , val );
1049
+ kvm_apic_set_id (apic , val >> 24 );
890
1050
else
891
1051
ret = 1 ;
892
1052
break ;
@@ -902,15 +1062,16 @@ static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
902
1062
903
1063
case APIC_LDR :
904
1064
if (!apic_x2apic_mode (apic ))
905
- apic_set_reg (apic , APIC_LDR , val & APIC_LDR_MASK );
1065
+ kvm_apic_set_ldr (apic , val & APIC_LDR_MASK );
906
1066
else
907
1067
ret = 1 ;
908
1068
break ;
909
1069
910
1070
case APIC_DFR :
911
- if (!apic_x2apic_mode (apic ))
1071
+ if (!apic_x2apic_mode (apic )) {
912
1072
apic_set_reg (apic , APIC_DFR , val | 0x0FFFFFFF );
913
- else
1073
+ recalculate_apic_map (apic -> vcpu -> kvm );
1074
+ } else
914
1075
ret = 1 ;
915
1076
break ;
916
1077
@@ -1141,6 +1302,7 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
1141
1302
static_key_slow_dec_deferred (& apic_hw_disabled );
1142
1303
else
1143
1304
static_key_slow_inc (& apic_hw_disabled .key );
1305
+ recalculate_apic_map (vcpu -> kvm );
1144
1306
}
1145
1307
1146
1308
if (!kvm_vcpu_is_bsp (apic -> vcpu ))
@@ -1150,7 +1312,7 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
1150
1312
if (apic_x2apic_mode (apic )) {
1151
1313
u32 id = kvm_apic_id (apic );
1152
1314
u32 ldr = ((id & ~0xf ) << 16 ) | (1 << (id & 0xf ));
1153
- apic_set_reg (apic , APIC_LDR , ldr );
1315
+ kvm_apic_set_ldr (apic , ldr );
1154
1316
}
1155
1317
apic -> base_address = apic -> vcpu -> arch .apic_base &
1156
1318
MSR_IA32_APICBASE_BASE ;
@@ -1175,7 +1337,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
1175
1337
/* Stop the timer in case it's a reset to an active apic */
1176
1338
hrtimer_cancel (& apic -> lapic_timer .timer );
1177
1339
1178
- apic_set_reg (apic , APIC_ID , vcpu -> vcpu_id << 24 );
1340
+ kvm_apic_set_id (apic , vcpu -> vcpu_id );
1179
1341
kvm_apic_set_version (apic -> vcpu );
1180
1342
1181
1343
for (i = 0 ; i < APIC_LVT_NUM ; i ++ )
@@ -1186,7 +1348,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
1186
1348
apic_set_reg (apic , APIC_DFR , 0xffffffffU );
1187
1349
apic_set_spiv (apic , 0xff );
1188
1350
apic_set_reg (apic , APIC_TASKPRI , 0 );
1189
- apic_set_reg (apic , APIC_LDR , 0 );
1351
+ kvm_apic_set_ldr (apic , 0 );
1190
1352
apic_set_reg (apic , APIC_ESR , 0 );
1191
1353
apic_set_reg (apic , APIC_ICR , 0 );
1192
1354
apic_set_reg (apic , APIC_ICR2 , 0 );
@@ -1404,6 +1566,8 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu,
1404
1566
/* set SPIV separately to get count of SW disabled APICs right */
1405
1567
apic_set_spiv (apic , * ((u32 * )(s -> regs + APIC_SPIV )));
1406
1568
memcpy (vcpu -> arch .apic -> regs , s -> regs , sizeof * s );
1569
+ /* call kvm_apic_set_id() to put apic into apic_map */
1570
+ kvm_apic_set_id (apic , kvm_apic_id (apic ));
1407
1571
kvm_apic_set_version (vcpu );
1408
1572
1409
1573
apic_update_ppr (apic );
0 commit comments