|
| 1 | +/* |
| 2 | + * |
| 3 | + * Copyright 2023 gRPC authors. |
| 4 | + * |
| 5 | + * Licensed under the Apache License, Version 2.0 (the "License"); |
| 6 | + * you may not use this file except in compliance with the License. |
| 7 | + * You may obtain a copy of the License at |
| 8 | + * |
| 9 | + * http://www.apache.org/licenses/LICENSE-2.0 |
| 10 | + * |
| 11 | + * Unless required by applicable law or agreed to in writing, software |
| 12 | + * distributed under the License is distributed on an "AS IS" BASIS, |
| 13 | + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 14 | + * See the License for the specific language governing permissions and |
| 15 | + * limitations under the License. |
| 16 | + * |
| 17 | + */ |
| 18 | + |
| 19 | +package interop |
| 20 | + |
| 21 | +import ( |
| 22 | + "context" |
| 23 | + "fmt" |
| 24 | + "sync" |
| 25 | + "time" |
| 26 | + |
| 27 | + v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" |
| 28 | + "google.golang.org/grpc/balancer" |
| 29 | + "google.golang.org/grpc/balancer/base" |
| 30 | + "google.golang.org/grpc/connectivity" |
| 31 | + "google.golang.org/grpc/orca" |
| 32 | +) |
| 33 | + |
| 34 | +func init() { |
| 35 | + balancer.Register(orcabb{}) |
| 36 | +} |
| 37 | + |
| 38 | +type orcabb struct{} |
| 39 | + |
| 40 | +func (orcabb) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { |
| 41 | + return &orcab{cc: cc} |
| 42 | +} |
| 43 | + |
| 44 | +func (orcabb) Name() string { |
| 45 | + return "test_backend_metrics_load_balancer" |
| 46 | +} |
| 47 | + |
| 48 | +type orcab struct { |
| 49 | + cc balancer.ClientConn |
| 50 | + sc balancer.SubConn |
| 51 | + cancelWatch func() |
| 52 | + |
| 53 | + reportMu sync.Mutex |
| 54 | + report *v3orcapb.OrcaLoadReport |
| 55 | +} |
| 56 | + |
| 57 | +func (o *orcab) UpdateClientConnState(s balancer.ClientConnState) error { |
| 58 | + if o.sc != nil { |
| 59 | + o.sc.UpdateAddresses(s.ResolverState.Addresses) |
| 60 | + return nil |
| 61 | + } |
| 62 | + |
| 63 | + if len(s.ResolverState.Addresses) == 0 { |
| 64 | + o.ResolverError(fmt.Errorf("produced no addresses")) |
| 65 | + return fmt.Errorf("resolver produced no addresses") |
| 66 | + } |
| 67 | + var err error |
| 68 | + o.sc, err = o.cc.NewSubConn(s.ResolverState.Addresses, balancer.NewSubConnOptions{}) |
| 69 | + if err != nil { |
| 70 | + o.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, Picker: base.NewErrPicker(fmt.Errorf("error creating subconn: %v", err))}) |
| 71 | + return nil |
| 72 | + } |
| 73 | + o.cancelWatch = orca.RegisterOOBListener(o.sc, o, orca.OOBListenerOptions{ReportInterval: time.Second}) |
| 74 | + o.sc.Connect() |
| 75 | + o.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable)}) |
| 76 | + return nil |
| 77 | +} |
| 78 | + |
| 79 | +func (o *orcab) ResolverError(err error) { |
| 80 | + if o.sc == nil { |
| 81 | + o.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, Picker: base.NewErrPicker(fmt.Errorf("resolver error: %v", err))}) |
| 82 | + } |
| 83 | +} |
| 84 | + |
| 85 | +func (o *orcab) UpdateSubConnState(sc balancer.SubConn, scState balancer.SubConnState) { |
| 86 | + if o.sc != sc { |
| 87 | + logger.Errorf("received subconn update for unknown subconn: %v vs %v", o.sc, sc) |
| 88 | + return |
| 89 | + } |
| 90 | + switch scState.ConnectivityState { |
| 91 | + case connectivity.Ready: |
| 92 | + o.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Ready, Picker: &scPicker{sc: sc, o: o}}) |
| 93 | + case connectivity.TransientFailure: |
| 94 | + o.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, Picker: base.NewErrPicker(fmt.Errorf("all subchannels in transient failure: %v", scState.ConnectionError))}) |
| 95 | + case connectivity.Connecting: |
| 96 | + // Ignore; picker already set to "connecting". |
| 97 | + case connectivity.Idle: |
| 98 | + sc.Connect() |
| 99 | + o.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable)}) |
| 100 | + case connectivity.Shutdown: |
| 101 | + // Ignore; we are closing but handle that in Close instead. |
| 102 | + } |
| 103 | +} |
| 104 | + |
| 105 | +func (o *orcab) Close() { |
| 106 | + o.cancelWatch() |
| 107 | +} |
| 108 | + |
| 109 | +func (o *orcab) OnLoadReport(r *v3orcapb.OrcaLoadReport) { |
| 110 | + o.reportMu.Lock() |
| 111 | + defer o.reportMu.Unlock() |
| 112 | + logger.Infof("received OOB load report: %v", r) |
| 113 | + o.report = r |
| 114 | +} |
| 115 | + |
| 116 | +type scPicker struct { |
| 117 | + sc balancer.SubConn |
| 118 | + o *orcab |
| 119 | +} |
| 120 | + |
| 121 | +func (p *scPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { |
| 122 | + doneCB := func(di balancer.DoneInfo) { |
| 123 | + if lr, _ := di.ServerLoad.(*v3orcapb.OrcaLoadReport); lr != nil && |
| 124 | + (lr.CpuUtilization != 0 || lr.MemUtilization != 0 || len(lr.Utilization) > 0 || len(lr.RequestCost) > 0) { |
| 125 | + // Since all RPCs will respond with a load report due to the |
| 126 | + // presence of the DialOption, we need to inspect every field and |
| 127 | + // use the out-of-band report instead if all are unset/zero. |
| 128 | + setContextCMR(info.Ctx, lr) |
| 129 | + } else { |
| 130 | + p.o.reportMu.Lock() |
| 131 | + defer p.o.reportMu.Unlock() |
| 132 | + if lr := p.o.report; lr != nil { |
| 133 | + setContextCMR(info.Ctx, lr) |
| 134 | + } |
| 135 | + } |
| 136 | + } |
| 137 | + return balancer.PickResult{SubConn: p.sc, Done: doneCB}, nil |
| 138 | +} |
| 139 | + |
| 140 | +func setContextCMR(ctx context.Context, lr *v3orcapb.OrcaLoadReport) { |
| 141 | + if r := orcaResultFromContext(ctx); r != nil { |
| 142 | + *r = lr |
| 143 | + } |
| 144 | +} |
| 145 | + |
| 146 | +type orcaKey string |
| 147 | + |
| 148 | +var orcaCtxKey = orcaKey("orcaResult") |
| 149 | + |
| 150 | +// contextWithORCAResult sets a key in ctx with a pointer to an ORCA load |
| 151 | +// report that is to be filled in by the "test_backend_metrics_load_balancer" |
| 152 | +// LB policy's Picker's Done callback. |
| 153 | +// |
| 154 | +// If a per-call load report is provided from the server for the call, result |
| 155 | +// will be filled with that, otherwise the most recent OOB load report is used. |
| 156 | +// If no OOB report has been received, result is not modified. |
| 157 | +func contextWithORCAResult(ctx context.Context, result **v3orcapb.OrcaLoadReport) context.Context { |
| 158 | + return context.WithValue(ctx, orcaCtxKey, result) |
| 159 | +} |
| 160 | + |
| 161 | +// orcaResultFromContext returns the ORCA load report stored in the context. |
| 162 | +// The LB policy uses this to communicate the load report back to the interop |
| 163 | +// client application. |
| 164 | +func orcaResultFromContext(ctx context.Context) **v3orcapb.OrcaLoadReport { |
| 165 | + v := ctx.Value(orcaCtxKey) |
| 166 | + if v == nil { |
| 167 | + return nil |
| 168 | + } |
| 169 | + return v.(**v3orcapb.OrcaLoadReport) |
| 170 | +} |
0 commit comments