@@ -128,10 +128,6 @@ static ssize_t __cstate_##_var##_show(struct device *dev, \
128
128
static struct device_attribute format_attr_##_var = \
129
129
__ATTR(_name, 0444, __cstate_##_var##_show, NULL)
130
130
131
- static ssize_t cstate_get_attr_cpumask (struct device * dev ,
132
- struct device_attribute * attr ,
133
- char * buf );
134
-
135
131
/* Model -> events mapping */
136
132
struct cstate_model {
137
133
unsigned long core_events ;
@@ -206,22 +202,9 @@ static struct attribute_group cstate_format_attr_group = {
206
202
.attrs = cstate_format_attrs ,
207
203
};
208
204
209
- static cpumask_t cstate_core_cpu_mask ;
210
- static DEVICE_ATTR (cpumask , S_IRUGO , cstate_get_attr_cpumask , NULL) ;
211
-
212
- static struct attribute * cstate_cpumask_attrs [] = {
213
- & dev_attr_cpumask .attr ,
214
- NULL ,
215
- };
216
-
217
- static struct attribute_group cpumask_attr_group = {
218
- .attrs = cstate_cpumask_attrs ,
219
- };
220
-
221
205
static const struct attribute_group * cstate_attr_groups [] = {
222
206
& cstate_events_attr_group ,
223
207
& cstate_format_attr_group ,
224
- & cpumask_attr_group ,
225
208
NULL ,
226
209
};
227
210
@@ -269,8 +252,6 @@ static struct perf_msr pkg_msr[] = {
269
252
[PERF_CSTATE_PKG_C10_RES ] = { MSR_PKG_C10_RESIDENCY , & group_cstate_pkg_c10 , test_msr },
270
253
};
271
254
272
- static cpumask_t cstate_pkg_cpu_mask ;
273
-
274
255
/* cstate_module PMU */
275
256
static struct pmu cstate_module_pmu ;
276
257
static bool has_cstate_module ;
@@ -291,28 +272,9 @@ static struct perf_msr module_msr[] = {
291
272
[PERF_CSTATE_MODULE_C6_RES ] = { MSR_MODULE_C6_RES_MS , & group_cstate_module_c6 , test_msr },
292
273
};
293
274
294
- static cpumask_t cstate_module_cpu_mask ;
295
-
296
- static ssize_t cstate_get_attr_cpumask (struct device * dev ,
297
- struct device_attribute * attr ,
298
- char * buf )
299
- {
300
- struct pmu * pmu = dev_get_drvdata (dev );
301
-
302
- if (pmu == & cstate_core_pmu )
303
- return cpumap_print_to_pagebuf (true, buf , & cstate_core_cpu_mask );
304
- else if (pmu == & cstate_pkg_pmu )
305
- return cpumap_print_to_pagebuf (true, buf , & cstate_pkg_cpu_mask );
306
- else if (pmu == & cstate_module_pmu )
307
- return cpumap_print_to_pagebuf (true, buf , & cstate_module_cpu_mask );
308
- else
309
- return 0 ;
310
- }
311
-
312
275
static int cstate_pmu_event_init (struct perf_event * event )
313
276
{
314
277
u64 cfg = event -> attr .config ;
315
- int cpu ;
316
278
317
279
if (event -> attr .type != event -> pmu -> type )
318
280
return - ENOENT ;
@@ -331,37 +293,24 @@ static int cstate_pmu_event_init(struct perf_event *event)
331
293
if (!(core_msr_mask & (1 << cfg )))
332
294
return - EINVAL ;
333
295
event -> hw .event_base = core_msr [cfg ].msr ;
334
- cpu = cpumask_any_and (& cstate_core_cpu_mask ,
335
- topology_sibling_cpumask (event -> cpu ));
336
296
} else if (event -> pmu == & cstate_pkg_pmu ) {
337
297
if (cfg >= PERF_CSTATE_PKG_EVENT_MAX )
338
298
return - EINVAL ;
339
299
cfg = array_index_nospec ((unsigned long )cfg , PERF_CSTATE_PKG_EVENT_MAX );
340
300
if (!(pkg_msr_mask & (1 << cfg )))
341
301
return - EINVAL ;
342
-
343
- event -> event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG ;
344
-
345
302
event -> hw .event_base = pkg_msr [cfg ].msr ;
346
- cpu = cpumask_any_and (& cstate_pkg_cpu_mask ,
347
- topology_die_cpumask (event -> cpu ));
348
303
} else if (event -> pmu == & cstate_module_pmu ) {
349
304
if (cfg >= PERF_CSTATE_MODULE_EVENT_MAX )
350
305
return - EINVAL ;
351
306
cfg = array_index_nospec ((unsigned long )cfg , PERF_CSTATE_MODULE_EVENT_MAX );
352
307
if (!(module_msr_mask & (1 << cfg )))
353
308
return - EINVAL ;
354
309
event -> hw .event_base = module_msr [cfg ].msr ;
355
- cpu = cpumask_any_and (& cstate_module_cpu_mask ,
356
- topology_cluster_cpumask (event -> cpu ));
357
310
} else {
358
311
return - ENOENT ;
359
312
}
360
313
361
- if (cpu >= nr_cpu_ids )
362
- return - ENODEV ;
363
-
364
- event -> cpu = cpu ;
365
314
event -> hw .config = cfg ;
366
315
event -> hw .idx = -1 ;
367
316
return 0 ;
@@ -412,84 +361,6 @@ static int cstate_pmu_event_add(struct perf_event *event, int mode)
412
361
return 0 ;
413
362
}
414
363
415
- /*
416
- * Check if exiting cpu is the designated reader. If so migrate the
417
- * events when there is a valid target available
418
- */
419
- static int cstate_cpu_exit (unsigned int cpu )
420
- {
421
- unsigned int target ;
422
-
423
- if (has_cstate_core &&
424
- cpumask_test_and_clear_cpu (cpu , & cstate_core_cpu_mask )) {
425
-
426
- target = cpumask_any_but (topology_sibling_cpumask (cpu ), cpu );
427
- /* Migrate events if there is a valid target */
428
- if (target < nr_cpu_ids ) {
429
- cpumask_set_cpu (target , & cstate_core_cpu_mask );
430
- perf_pmu_migrate_context (& cstate_core_pmu , cpu , target );
431
- }
432
- }
433
-
434
- if (has_cstate_pkg &&
435
- cpumask_test_and_clear_cpu (cpu , & cstate_pkg_cpu_mask )) {
436
-
437
- target = cpumask_any_but (topology_die_cpumask (cpu ), cpu );
438
- /* Migrate events if there is a valid target */
439
- if (target < nr_cpu_ids ) {
440
- cpumask_set_cpu (target , & cstate_pkg_cpu_mask );
441
- perf_pmu_migrate_context (& cstate_pkg_pmu , cpu , target );
442
- }
443
- }
444
-
445
- if (has_cstate_module &&
446
- cpumask_test_and_clear_cpu (cpu , & cstate_module_cpu_mask )) {
447
-
448
- target = cpumask_any_but (topology_cluster_cpumask (cpu ), cpu );
449
- /* Migrate events if there is a valid target */
450
- if (target < nr_cpu_ids ) {
451
- cpumask_set_cpu (target , & cstate_module_cpu_mask );
452
- perf_pmu_migrate_context (& cstate_module_pmu , cpu , target );
453
- }
454
- }
455
- return 0 ;
456
- }
457
-
458
- static int cstate_cpu_init (unsigned int cpu )
459
- {
460
- unsigned int target ;
461
-
462
- /*
463
- * If this is the first online thread of that core, set it in
464
- * the core cpu mask as the designated reader.
465
- */
466
- target = cpumask_any_and (& cstate_core_cpu_mask ,
467
- topology_sibling_cpumask (cpu ));
468
-
469
- if (has_cstate_core && target >= nr_cpu_ids )
470
- cpumask_set_cpu (cpu , & cstate_core_cpu_mask );
471
-
472
- /*
473
- * If this is the first online thread of that package, set it
474
- * in the package cpu mask as the designated reader.
475
- */
476
- target = cpumask_any_and (& cstate_pkg_cpu_mask ,
477
- topology_die_cpumask (cpu ));
478
- if (has_cstate_pkg && target >= nr_cpu_ids )
479
- cpumask_set_cpu (cpu , & cstate_pkg_cpu_mask );
480
-
481
- /*
482
- * If this is the first online thread of that cluster, set it
483
- * in the cluster cpu mask as the designated reader.
484
- */
485
- target = cpumask_any_and (& cstate_module_cpu_mask ,
486
- topology_cluster_cpumask (cpu ));
487
- if (has_cstate_module && target >= nr_cpu_ids )
488
- cpumask_set_cpu (cpu , & cstate_module_cpu_mask );
489
-
490
- return 0 ;
491
- }
492
-
493
364
static const struct attribute_group * core_attr_update [] = {
494
365
& group_cstate_core_c1 ,
495
366
& group_cstate_core_c3 ,
@@ -526,6 +397,7 @@ static struct pmu cstate_core_pmu = {
526
397
.stop = cstate_pmu_event_stop ,
527
398
.read = cstate_pmu_event_update ,
528
399
.capabilities = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE ,
400
+ .scope = PERF_PMU_SCOPE_CORE ,
529
401
.module = THIS_MODULE ,
530
402
};
531
403
@@ -541,6 +413,7 @@ static struct pmu cstate_pkg_pmu = {
541
413
.stop = cstate_pmu_event_stop ,
542
414
.read = cstate_pmu_event_update ,
543
415
.capabilities = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE ,
416
+ .scope = PERF_PMU_SCOPE_PKG ,
544
417
.module = THIS_MODULE ,
545
418
};
546
419
@@ -556,6 +429,7 @@ static struct pmu cstate_module_pmu = {
556
429
.stop = cstate_pmu_event_stop ,
557
430
.read = cstate_pmu_event_update ,
558
431
.capabilities = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE ,
432
+ .scope = PERF_PMU_SCOPE_CLUSTER ,
559
433
.module = THIS_MODULE ,
560
434
};
561
435
@@ -810,9 +684,6 @@ static int __init cstate_probe(const struct cstate_model *cm)
810
684
811
685
static inline void cstate_cleanup (void )
812
686
{
813
- cpuhp_remove_state_nocalls (CPUHP_AP_PERF_X86_CSTATE_ONLINE );
814
- cpuhp_remove_state_nocalls (CPUHP_AP_PERF_X86_CSTATE_STARTING );
815
-
816
687
if (has_cstate_core )
817
688
perf_pmu_unregister (& cstate_core_pmu );
818
689
@@ -827,11 +698,6 @@ static int __init cstate_init(void)
827
698
{
828
699
int err ;
829
700
830
- cpuhp_setup_state (CPUHP_AP_PERF_X86_CSTATE_STARTING ,
831
- "perf/x86/cstate:starting" , cstate_cpu_init , NULL );
832
- cpuhp_setup_state (CPUHP_AP_PERF_X86_CSTATE_ONLINE ,
833
- "perf/x86/cstate:online" , NULL , cstate_cpu_exit );
834
-
835
701
if (has_cstate_core ) {
836
702
err = perf_pmu_register (& cstate_core_pmu , cstate_core_pmu .name , -1 );
837
703
if (err ) {
@@ -844,6 +710,8 @@ static int __init cstate_init(void)
844
710
845
711
if (has_cstate_pkg ) {
846
712
if (topology_max_dies_per_package () > 1 ) {
713
+ /* CLX-AP is multi-die and the cstate is die-scope */
714
+ cstate_pkg_pmu .scope = PERF_PMU_SCOPE_DIE ;
847
715
err = perf_pmu_register (& cstate_pkg_pmu ,
848
716
"cstate_die" , -1 );
849
717
} else {
0 commit comments