Skip to content

Commit 310c413

Browse files
committed
profilecreator: add and use Alert function
We used the `logrus` package internally, but minimally and pretty much only to notify processing status to the user. This code was thus the only remaining consumer of that package, which we don't really need. Add a neutral (nor log, nor warning) minimal abstraction `Alert`, similar in spirit to `ghw`, to be used when we need to notify the user about processing status. Make the backend easy to replace to make integration of this code easier. We don't see this happening, but the extra cost is negligible. Remove the direct dependency to `logrus`. Note we still have quite many indirect dependencies pulling it in. Signed-off-by: Francesco Romani <[email protected]>
1 parent 7474c5e commit 310c413

37 files changed

+133
-3236
lines changed

go.mod

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,6 @@ require (
2626
github.com/openshift/library-go v0.0.0-20240419113445-f1541d628746
2727
github.com/pkg/errors v0.9.1
2828
github.com/prometheus/client_golang v1.21.1
29-
github.com/sirupsen/logrus v1.9.3
3029
github.com/spf13/cobra v1.9.1
3130
github.com/spf13/pflag v1.0.6
3231
gopkg.in/fsnotify.v1 v1.4.7

go.sum

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -434,8 +434,6 @@ github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0
434434
github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
435435
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
436436
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
437-
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
438-
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
439437
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
440438
github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
441439
github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
@@ -686,7 +684,6 @@ golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBc
686684
golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
687685
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
688686
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
689-
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
690687
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
691688
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
692689
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
package profilecreator
2+
3+
import (
4+
"log"
5+
"os"
6+
)
7+
8+
var alertSink *log.Logger
9+
10+
func init() {
11+
// intentionally no flag for minimal output
12+
alertSink = log.New(os.Stderr, "PPC: ", 0)
13+
}
14+
15+
// Alert raise status messages. `profilecreator` is meant to run interactively,
16+
// so the default behavior is to bubble up the messages on the user console.
17+
// It is however possible to reroute the messages to any log.Logger compliant backend
18+
// see the function SetAlertSink. All Alerts are considered Info messages.
19+
func Alert(format string, values ...any) {
20+
alertSink.Printf(format, values...)
21+
}
22+
23+
// Call this function before any other else in the `profilecreator` package
24+
func SetAlertSink(lh *log.Logger) {
25+
alertSink = lh
26+
}
27+
28+
func GetAlertSink() *log.Logger {
29+
return alertSink
30+
}

pkg/performanceprofile/profilecreator/cmd/info.go

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@ import (
77
"sort"
88
"strings"
99

10-
log "github.com/sirupsen/logrus"
1110
"github.com/spf13/cobra"
1211
)
1312

@@ -95,13 +94,13 @@ func makeClusterInfoFromClusterData(cluster ClusterData) ClusterInfo {
9594
for _, handle := range nodeHandlers {
9695
topology, err := handle.SortedTopology()
9796
if err != nil {
98-
log.Infof("%s(Topology discovery error: %v)", handle.Node.GetName(), err)
97+
Alert("%s(Topology discovery error: %v)", handle.Node.GetName(), err)
9998
continue
10099
}
101100

102101
htEnabled, err := handle.IsHyperthreadingEnabled()
103102
if err != nil {
104-
log.Infof("%s(HT discovery error: %v)", handle.Node.GetName(), err)
103+
Alert("%s(HT discovery error: %v)", handle.Node.GetName(), err)
105104
}
106105

107106
nInfo := NodeInfo{
@@ -136,7 +135,7 @@ func showClusterInfo(cInfo ClusterInfo, infoOpts *infoOptions) error {
136135
fmt.Print(textInfo)
137136
return nil
138137
}
139-
log.Infof("Cluster info:\n%s", textInfo)
138+
Alert("Cluster info:\n%s", textInfo)
140139
return nil
141140
}
142141

pkg/performanceprofile/profilecreator/cmd/root.go

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,6 @@ import (
2222
"os"
2323
"strings"
2424

25-
log "github.com/sirupsen/logrus"
2625
"github.com/spf13/cobra"
2726
"github.com/spf13/pflag"
2827

@@ -105,11 +104,10 @@ type ProfileData struct {
105104
// ClusterData collects the cluster wide information, each mcp points to a list of ghw node handlers
106105
type ClusterData map[string][]*profilecreator.GHWHandler
107106

107+
// shortcut
108+
var Alert = profilecreator.Alert
109+
108110
func init() {
109-
log.SetOutput(os.Stderr)
110-
log.SetFormatter(&log.TextFormatter{
111-
DisableTimestamp: true,
112-
})
113111
utilruntime.Must(performancev2.AddToScheme(scheme))
114112
}
115113

@@ -200,7 +198,7 @@ func makeNodesHandlers(mustGatherDirPath, poolName string, nodes []*corev1.Node)
200198
sb.WriteString(node.Name + " ")
201199
}
202200
// NodePoolName is alias of MCPName
203-
log.Infof("Nodes names targeted by %s pool are: %s", poolName, sb.String())
201+
Alert("Nodes names targeted by %s pool are: %s", poolName, sb.String())
204202
return handlers, nil
205203
}
206204

@@ -310,8 +308,8 @@ func makeProfileDataFrom(nodeHandler *profilecreator.GHWHandler, args *ProfileCr
310308
if err != nil {
311309
return nil, fmt.Errorf("failed to compute the reserved and isolated CPUs: %v", err)
312310
}
313-
log.Infof("%d reserved CPUs allocated: %v ", reservedCPUs.Size(), reservedCPUs.String())
314-
log.Infof("%d isolated CPUs allocated: %v", isolatedCPUs.Size(), isolatedCPUs.String())
311+
Alert("%d reserved CPUs allocated: %v ", reservedCPUs.Size(), reservedCPUs.String())
312+
Alert("%d isolated CPUs allocated: %v", isolatedCPUs.Size(), isolatedCPUs.String())
315313
kernelArgs := profilecreator.GetAdditionalKernelArgs(args.DisableHT)
316314
profileData := &ProfileData{
317315
reservedCPUs: reservedCPUs.String(),

pkg/performanceprofile/profilecreator/mcp.go

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,6 @@ import (
44
"fmt"
55
"strings"
66

7-
log "github.com/sirupsen/logrus"
8-
97
corev1 "k8s.io/api/core/v1"
108
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
119
"k8s.io/apimachinery/pkg/labels"
@@ -77,7 +75,7 @@ func GetNodesForPool(pool *mcfgv1.MachineConfigPool, clusterPools []*mcfgv1.Mach
7775
for _, n := range clusterNodes {
7876
p, err := getPrimaryPoolForNode(n, clusterPools)
7977
if err != nil {
80-
log.Warningf("can't get pool for node %q: %v", n.Name, err)
78+
Alert("can't get pool for node %q: %v", n.Name, err)
8179
continue
8280
}
8381
if p == nil {
@@ -121,7 +119,7 @@ func getPoolsForNode(node *corev1.Node, clusterPools []*mcfgv1.MachineConfigPool
121119
if isWindows(node) {
122120
// This is not an error, is this a Windows Node and it won't be managed by MCO. We're explicitly logging
123121
// here at a high level to disambiguate this from other pools = nil scenario
124-
log.Infof("Node %v is a windows node so won't be managed by MCO", node.Name)
122+
Alert("Node %v is a windows node so won't be managed by MCO", node.Name)
125123
return nil, nil
126124
}
127125

pkg/performanceprofile/profilecreator/mustgather.go

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,6 @@ import (
2424
"strings"
2525

2626
machineconfigv1 "github.com/openshift/api/machineconfiguration/v1"
27-
log "github.com/sirupsen/logrus"
2827
v1 "k8s.io/api/core/v1"
2928
k8syaml "k8s.io/apimachinery/pkg/util/yaml"
3029
)
@@ -68,7 +67,7 @@ func getMustGatherFullPathsWithFilter(mustGatherPath string, suffix string, filt
6867
return "", fmt.Errorf("no match for the specified must gather directory path: %s and suffix: %s", mustGatherPath, suffix)
6968
}
7069
if len(paths) > 1 {
71-
log.Warnf("Multiple matches for the specified must gather directory path: %s and suffix: %s", mustGatherPath, suffix)
70+
Alert("Multiple matches for the specified must gather directory path: %s and suffix: %s", mustGatherPath, suffix)
7271
return "", fmt.Errorf("Multiple matches for the specified must gather directory path: %s and suffix: %s.\n Expected only one performance-addon-operator-must-gather* directory, please check the must-gather tarball", mustGatherPath, suffix)
7372
}
7473
// returning one possible path

pkg/performanceprofile/profilecreator/profilecreator.go

Lines changed: 9 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,6 @@ import (
2525

2626
"github.com/jaypipes/ghw/pkg/cpu"
2727
"github.com/jaypipes/ghw/pkg/topology"
28-
log "github.com/sirupsen/logrus"
2928

3029
configv1 "github.com/openshift/api/config/v1"
3130
k8syaml "k8s.io/apimachinery/pkg/util/yaml"
@@ -239,7 +238,7 @@ func getOfflinedCPUs(extCpuInfo *extendedCPUInfo, offlinedCPUCount int, disableH
239238
}
240239

241240
if lpOfflined < offlinedCPUCount {
242-
log.Warnf("could not offline enough logical processors (required:%d, offlined:%d)", offlinedCPUCount, lpOfflined)
241+
Alert("could not offline enough logical processors (required:%d, offlined:%d)", offlinedCPUCount, lpOfflined)
243242
}
244243
return offlined.Result(), nil
245244
}
@@ -248,29 +247,29 @@ func updateTopologyInfo(topoInfo *topology.Info, disableHTFlag bool, htEnabled b
248247
//currently HT is enabled on the system and the user wants to disable HT
249248

250249
if htEnabled && disableHTFlag {
251-
log.Infof("Updating Topology info because currently hyperthreading is enabled and the performance profile will disable it")
250+
Alert("Updating Topology info because currently hyperthreading is enabled and the performance profile will disable it")
252251
return topologyHTDisabled(topoInfo), nil
253252
}
254253
return topoInfo, nil
255254
}
256255

257256
func getReservedCPUs(topologyInfo *topology.Info, reservedCPUCount int, splitReservedCPUsAcrossNUMA bool, disableHTFlag bool, htEnabled bool) (cpuset.CPUSet, error) {
258257
if htEnabled && disableHTFlag {
259-
log.Infof("Currently hyperthreading is enabled and the performance profile will disable it")
258+
Alert("Currently hyperthreading is enabled and the performance profile will disable it")
260259
htEnabled = false
261260
}
262-
log.Infof("NUMA cell(s): %d", len(topologyInfo.Nodes))
261+
Alert("NUMA cell(s): %d", len(topologyInfo.Nodes))
263262
totalCPUs := 0
264263
for id, node := range topologyInfo.Nodes {
265264
coreList := []int{}
266265
for _, core := range node.Cores {
267266
coreList = append(coreList, core.LogicalProcessors...)
268267
}
269-
log.Infof("NUMA cell %d : %v", id, coreList)
268+
Alert("NUMA cell %d : %v", id, coreList)
270269
totalCPUs += len(coreList)
271270
}
272271

273-
log.Infof("CPU(s): %d", totalCPUs)
272+
Alert("CPU(s): %d", totalCPUs)
274273

275274
if splitReservedCPUsAcrossNUMA {
276275
res, err := getCPUsSplitAcrossNUMA(reservedCPUCount, htEnabled, topologyInfo.Nodes)
@@ -351,7 +350,7 @@ func getCPUsSplitAcrossNUMA(reservedCPUCount int, htEnabled bool, topologyInfoNo
351350
reservedPerNuma := reservedCPUCount / numaNodeNum
352351
remainder := reservedCPUCount % numaNodeNum
353352
if remainder != 0 {
354-
log.Warnf("The reserved CPUs cannot be split equally across NUMA Nodes")
353+
Alert("The reserved CPUs cannot be split equally across NUMA Nodes")
355354
}
356355
for numaID, node := range topologyInfoNodes {
357356
if remainder != 0 {
@@ -467,7 +466,7 @@ func ensureSameTopology(topology1, topology2 *topology.Info, tols toleration.Set
467466
// as the NUMA cells have same logical processors' count and IDs and same threads' number,
468467
// core ID equality is treated as best effort. That is because when scheduling workloads,
469468
// we care about the logical processors ids and their location on the NUMAs.
470-
log.Warnf("the CPU core ids in NUMA node %d differ: %d vs %d", node1.ID, core1.ID, cores2[j].ID)
469+
Alert("the CPU core ids in NUMA node %d differ: %d vs %d", node1.ID, core1.ID, cores2[j].ID)
471470
tols[toleration.DifferentCoreIDs] = true
472471
}
473472
if core1.NumThreads != cores2[j].NumThreads {
@@ -488,7 +487,7 @@ func GetAdditionalKernelArgs(disableHT bool) []string {
488487
kernelArgs = append(kernelArgs, noSMTKernelArg)
489488
}
490489
sort.Strings(kernelArgs)
491-
log.Infof("Additional Kernel Args based on configuration: %v", kernelArgs)
490+
Alert("Additional Kernel Args based on configuration: %v", kernelArgs)
492491
return kernelArgs
493492
}
494493

0 commit comments

Comments
 (0)