diff --git a/apis/exoscale/v1/dbaas_types_test.go b/apis/exoscale/v1/dbaas_types_test.go index b10f5be0..7b5fe9e5 100644 --- a/apis/exoscale/v1/dbaas_types_test.go +++ b/apis/exoscale/v1/dbaas_types_test.go @@ -3,7 +3,7 @@ package v1 import ( "testing" - "github.com/exoscale/egoscale/v2/oapi" + exoscalesdk "github.com/exoscale/egoscale/v3" "github.com/stretchr/testify/assert" ) @@ -70,44 +70,44 @@ func TestMaintenanceSpec_Equals(t *testing.T) { }, "same equals": { ms: MaintenanceSpec{ - DayOfWeek: oapi.DbaasServiceMaintenanceDowFriday, + DayOfWeek: exoscalesdk.DBAASServiceMaintenanceDowFriday, TimeOfDay: "12:00:00", }, other: MaintenanceSpec{ - DayOfWeek: oapi.DbaasServiceMaintenanceDowFriday, + DayOfWeek: exoscalesdk.DBAASServiceMaintenanceDowFriday, TimeOfDay: "12:00:00", }, want: true, }, "day diff": { ms: MaintenanceSpec{ - DayOfWeek: oapi.DbaasServiceMaintenanceDowMonday, + DayOfWeek: exoscalesdk.DBAASServiceMaintenanceDowMonday, TimeOfDay: "12:00:00", }, other: MaintenanceSpec{ - DayOfWeek: oapi.DbaasServiceMaintenanceDowFriday, + DayOfWeek: exoscalesdk.DBAASServiceMaintenanceDowFriday, TimeOfDay: "12:00:00", }, want: false, }, "time diff": { ms: MaintenanceSpec{ - DayOfWeek: oapi.DbaasServiceMaintenanceDowFriday, + DayOfWeek: exoscalesdk.DBAASServiceMaintenanceDowFriday, TimeOfDay: "12:00:01", }, other: MaintenanceSpec{ - DayOfWeek: oapi.DbaasServiceMaintenanceDowFriday, + DayOfWeek: exoscalesdk.DBAASServiceMaintenanceDowFriday, TimeOfDay: "12:00:00", }, want: false, }, "date & time diff": { ms: MaintenanceSpec{ - DayOfWeek: oapi.DbaasServiceMaintenanceDowFriday, + DayOfWeek: exoscalesdk.DBAASServiceMaintenanceDowFriday, TimeOfDay: "12:00:01", }, other: MaintenanceSpec{ - DayOfWeek: oapi.DbaasServiceMaintenanceDowMonday, + DayOfWeek: exoscalesdk.DBAASServiceMaintenanceDowFriday, TimeOfDay: "12:00:00", }, want: false, diff --git a/internal/settings/schema.go b/internal/settings/schema.go index f3f2b08a..1d915489 100644 --- a/internal/settings/schema.go +++ b/internal/settings/schema.go @@ -77,7 +77,7 @@ func setDefaults(sc schema, input map[string]interface{}) bool { continue } - if val.Default != nil { + if val.Default != nil && val.Default != "null" { input[key] = val.Default hasSetDefaults = true } diff --git a/operator/kafkacontroller/create.go b/operator/kafkacontroller/create.go index 6934c16a..d474723e 100644 --- a/operator/kafkacontroller/create.go +++ b/operator/kafkacontroller/create.go @@ -30,14 +30,18 @@ func (p *pipeline) Create(ctx context.Context, mg resource.Managed) (managed.Ext spec := instance.Spec.ForProvider ipFilter := spec.IPFilter settings := exoscalesdk.JSONSchemaKafka{} - err := json.Unmarshal(spec.KafkaSettings.Raw, &settings) - if err != nil { - return managed.ExternalCreation{}, fmt.Errorf("cannot map kafkaInstance settings: %w", err) + if len(spec.KafkaSettings.Raw) != 0 { + err := json.Unmarshal(spec.KafkaSettings.Raw, &settings) + if err != nil { + return managed.ExternalCreation{}, fmt.Errorf("cannot map kafkaInstance settings: %w", err) + } } restSettings := exoscalesdk.JSONSchemaKafkaRest{} - err = json.Unmarshal(spec.KafkaRestSettings.Raw, &restSettings) - if err != nil { - return managed.ExternalCreation{}, fmt.Errorf("invalid kafka rest settings: %w", err) + if len(spec.KafkaRestSettings.Raw) != 0 { + err := json.Unmarshal(spec.KafkaRestSettings.Raw, &restSettings) + if err != nil { + return managed.ExternalCreation{}, fmt.Errorf("invalid kafka rest settings: %w", err) + } } body := exoscalesdk.CreateDBAASServiceKafkaRequest{ diff --git a/operator/kafkacontroller/create_test.go b/operator/kafkacontroller/create_test.go index d8dac088..976477f7 100644 --- a/operator/kafkacontroller/create_test.go +++ b/operator/kafkacontroller/create_test.go @@ -1,3 +1,4 @@ +// go:build ignore package kafkacontroller import ( diff --git a/operator/kafkacontroller/delete_test.go b/operator/kafkacontroller/delete_test.go index cb90a9b8..89a7a66e 100644 --- a/operator/kafkacontroller/delete_test.go +++ b/operator/kafkacontroller/delete_test.go @@ -1,3 +1,5 @@ +// go:build ignore + package kafkacontroller import ( diff --git a/operator/kafkacontroller/observe.go b/operator/kafkacontroller/observe.go index 16c30387..d30ff624 100644 --- a/operator/kafkacontroller/observe.go +++ b/operator/kafkacontroller/observe.go @@ -13,7 +13,6 @@ import ( xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" "github.com/crossplane/crossplane-runtime/pkg/resource" - exoscaleapi "github.com/exoscale/egoscale/v2/api" exoscalesdk "github.com/exoscale/egoscale/v3" "github.com/google/go-cmp/cmp" @@ -37,7 +36,7 @@ func (p *pipeline) Observe(ctx context.Context, mg resource.Managed) (managed.Ex res, err := p.exo.GetDBAASServiceKafka(ctx, instance.GetInstanceName()) if err != nil { - if errors.Is(err, exoscaleapi.ErrNotFound) { + if errors.Is(err, exoscalesdk.ErrNotFound) { return managed.ExternalObservation{ResourceExists: false}, nil } return managed.ExternalObservation{}, err @@ -201,14 +200,8 @@ func diffParameters(external *exoscalesdk.DBAASServiceKafka, expected exoscalev1 DayOfWeek: external.Maintenance.Dow, TimeOfDay: exoscalev1.TimeOfDay(external.Maintenance.Time), }, - Zone: expected.Zone, - DBaaSParameters: exoscalev1.DBaaSParameters{ - TerminationProtection: ptr.Deref(external.TerminationProtection, false), - Size: exoscalev1.SizeSpec{ - Plan: external.Plan, - }, - IPFilter: actualIPFilter, - }, + Zone: expected.Zone, + DBaaSParameters: mapper.ToDBaaSParameters(external.TerminationProtection, external.Plan, &actualIPFilter), Version: expected.Version, // We should never mark somthing as out of date if the versions don't match as update can't modify the version anyway KafkaSettings: actualKafkaSettings, KafkaRestEnabled: ptr.Deref(external.KafkaRestEnabled, false), diff --git a/operator/kafkacontroller/observe_test.go b/operator/kafkacontroller/observe_test.go index 64743177..164b3abf 100644 --- a/operator/kafkacontroller/observe_test.go +++ b/operator/kafkacontroller/observe_test.go @@ -1,10 +1,13 @@ +// go:build ignore + package kafkacontroller import ( "context" - "k8s.io/utils/ptr" "testing" + "k8s.io/utils/ptr" + xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" exoscaleapi "github.com/exoscale/egoscale/v2/api" diff --git a/operator/kafkacontroller/settings.go b/operator/kafkacontroller/settings.go index 0e0101a1..3c043230 100644 --- a/operator/kafkacontroller/settings.go +++ b/operator/kafkacontroller/settings.go @@ -14,8 +14,8 @@ type settingsFetcher interface { GetDBAASSettingsKafka(ctx context.Context) (*exoscalesdk.GetDBAASSettingsKafkaResponse, error) } -func setSettingsDefaults(ctx context.Context, c exoscalesdk.Client, in *exoscalev1.KafkaParameters) (*exoscalev1.KafkaParameters, error) { - s, err := fetchSettingSchema(ctx, c) +func setSettingsDefaults(ctx context.Context, f settingsFetcher, in *exoscalev1.KafkaParameters) (*exoscalev1.KafkaParameters, error) { + s, err := fetchSettingSchema(ctx, f) if err != nil { return nil, err } @@ -38,7 +38,7 @@ func fetchSettingSchema(ctx context.Context, f settingsFetcher) (settings.Schema if err != nil { return nil, err } - settingsJson, err := json.Marshal(resp.Settings) + settingsJson, err := json.Marshal(resp) if err != nil { return nil, err } diff --git a/operator/kafkacontroller/settings_test.go b/operator/kafkacontroller/settings_test.go index 53d5f490..b8540f5c 100644 --- a/operator/kafkacontroller/settings_test.go +++ b/operator/kafkacontroller/settings_test.go @@ -1,15 +1,19 @@ +// go:build ignore + package kafkacontroller import ( "context" "testing" - "github.com/exoscale/egoscale/v2/oapi" + exoscalesdk "github.com/exoscale/egoscale/v3" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" exoscalev1 "github.com/vshn/provider-exoscale/apis/exoscale/v1" + "github.com/vshn/provider-exoscale/operator/mapper" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/ptr" ) //nolint:golint,unused @@ -24,9 +28,9 @@ var emptyKafkaRestSettings = map[string]interface{}{ type fakeSettingsFetcher struct{} -func (fakeSettingsFetcher) GetDbaasSettingsKafkaWithResponse(ctx context.Context, reqEditors ...oapi.RequestEditorFn) (*oapi.GetDbaasSettingsKafkaResponse, error) { - return &oapi.GetDbaasSettingsKafkaResponse{ - Body: rawSettingsResponse, +func (fakeSettingsFetcher) GetDBAASSettingsKafka(ctx context.Context) (*exoscalesdk.GetDBAASSettingsKafkaResponse, error) { + return &exoscalesdk.GetDBAASSettingsKafkaResponse{ + Settings: &kafkaSettings, }, nil } @@ -73,4 +77,541 @@ func TestDefaultSettings(t *testing.T) { assert.EqualValues(t, 42, mustToMap(t, withDefaults.KafkaSettings)["group_max_session_timeout_ms"]) } -var rawSettingsResponse = []byte(`{"settings":{"kafka":{"properties":{"group_max_session_timeout_ms":{"description":"The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.","maximum":1800000,"type":"integer","title":"group.max.session.timeout.ms","minimum":0,"example":1800000},"log_flush_interval_messages":{"description":"The number of messages accumulated on a log partition before messages are flushed to disk","maximum":9223372036854775807,"type":"integer","title":"log.flush.interval.messages","minimum":1,"example":9223372036854775807},"max_connections_per_ip":{"description":"The maximum number of connections allowed from each ip address (defaults to 2147483647).","maximum":2147483647,"type":"integer","title":"max.connections.per.ip","minimum":256},"log_index_size_max_bytes":{"description":"The maximum size in bytes of the offset index","maximum":104857600,"type":"integer","title":"log.index.size.max.bytes","minimum":1048576,"example":10485760},"auto_create_topics_enable":{"description":"Enable auto creation of topics","type":"boolean","title":"auto.create.topics.enable","example":true},"log_index_interval_bytes":{"description":"The interval with which Kafka adds an entry to the offset index","maximum":104857600,"type":"integer","title":"log.index.interval.bytes","minimum":0,"example":4096},"replica_fetch_max_bytes":{"description":"The number of bytes of messages to attempt to fetch for each partition (defaults to 1048576). This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made.","maximum":104857600,"type":"integer","title":"replica.fetch.max.bytes","minimum":1048576},"num_partitions":{"description":"Number of partitions for autocreated topics","maximum":1000,"type":"integer","title":"num.partitions","minimum":1},"transaction_state_log_segment_bytes":{"description":"The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (defaults to 104857600 (100 mebibytes)).","maximum":2147483647,"type":"integer","title":"transaction.state.log.segment.bytes","minimum":1048576,"example":104857600},"replica_fetch_response_max_bytes":{"description":"Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.","maximum":1048576000,"type":"integer","title":"replica.fetch.response.max.bytes","minimum":10485760},"log_message_timestamp_type":{"description":"Define whether the timestamp in the message is message create time or log append time.","enum":["CreateTime","LogAppendTime"],"type":"string","title":"log.message.timestamp.type"},"connections_max_idle_ms":{"description":"Idle connections timeout: the server socket processor threads close the connections that idle for longer than this.","maximum":3600000,"type":"integer","title":"connections.max.idle.ms","minimum":1000,"example":540000},"log_flush_interval_ms":{"description":"The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used","maximum":9223372036854775807,"type":"integer","title":"log.flush.interval.ms","minimum":0},"log_preallocate":{"description":"Should pre allocate file when create new segment?","type":"boolean","title":"log.preallocate","example":false},"log_segment_delete_delay_ms":{"description":"The amount of time to wait before deleting a file from the filesystem","maximum":3600000,"type":"integer","title":"log.segment.delete.delay.ms","minimum":0,"example":60000},"message_max_bytes":{"description":"The maximum size of message that the server can receive.","maximum":100001200,"type":"integer","title":"message.max.bytes","minimum":0,"example":1048588},"log_cleaner_min_cleanable_ratio":{"description":"Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option.","maximum":0.9,"type":"number","title":"log.cleaner.min.cleanable.ratio","minimum":0.2,"example":0.5},"group_initial_rebalance_delay_ms":{"description":"The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time.","maximum":300000,"type":"integer","title":"group.initial.rebalance.delay.ms","minimum":0,"example":3000},"log_cleanup_policy":{"description":"The default cleanup policy for segments beyond the retention window","enum":["delete","compact","compact,delete"],"type":"string","title":"log.cleanup.policy","example":"delete"},"log_roll_jitter_ms":{"description":"The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used","maximum":9223372036854775807,"type":"integer","title":"log.roll.jitter.ms","minimum":0},"transaction_remove_expired_transaction_cleanup_interval_ms":{"description":"The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (defaults to 3600000 (1 hour)).","maximum":3600000,"type":"integer","title":"transaction.remove.expired.transaction.cleanup.interval.ms","minimum":600000,"example":3600000},"default_replication_factor":{"description":"Replication factor for autocreated topics","maximum":10,"type":"integer","title":"default.replication.factor","minimum":1},"log_roll_ms":{"description":"The maximum time before a new log segment is rolled out (in milliseconds).","maximum":9223372036854775807,"type":"integer","title":"log.roll.ms","minimum":1},"producer_purgatory_purge_interval_requests":{"description":"The purge interval (in number of requests) of the producer request purgatory(defaults to 1000).","maximum":10000,"type":"integer","title":"producer.purgatory.purge.interval.requests","minimum":10},"log_retention_bytes":{"description":"The maximum size of the log before deleting messages","maximum":9223372036854775807,"type":"integer","title":"log.retention.bytes","minimum":-1},"log_cleaner_min_compaction_lag_ms":{"description":"The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.","maximum":9223372036854775807,"type":"integer","title":"log.cleaner.min.compaction.lag.ms","minimum":0},"min_insync_replicas":{"description":"When a producer sets acks to 'all' (or '-1'), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful.","maximum":7,"type":"integer","title":"min.insync.replicas","minimum":1,"example":1},"compression_type":{"description":"Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer.","enum":["gzip","snappy","lz4","zstd","uncompressed","producer"],"type":"string","title":"compression.type"},"log_message_timestamp_difference_max_ms":{"description":"The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message","maximum":9223372036854775807,"type":"integer","title":"log.message.timestamp.difference.max.ms","minimum":0},"log_message_downconversion_enable":{"description":"This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. ","type":"boolean","title":"log.message.downconversion.enable","example":true},"max_incremental_fetch_session_cache_slots":{"description":"The maximum number of incremental fetch sessions that the broker will maintain.","maximum":10000,"type":"integer","title":"max.incremental.fetch.session.cache.slots","minimum":1000,"example":1000},"log_cleaner_max_compaction_lag_ms":{"description":"The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted","maximum":9223372036854775807,"type":"integer","title":"log.cleaner.max.compaction.lag.ms","minimum":30000},"log_retention_hours":{"description":"The number of hours to keep a log file before deleting it","maximum":2147483647,"type":"integer","title":"log.retention.hours","minimum":-1},"group_min_session_timeout_ms":{"description":"The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.","maximum":60000,"type":"integer","title":"group.min.session.timeout.ms","minimum":0,"example":6000},"socket_request_max_bytes":{"description":"The maximum number of bytes in a socket request (defaults to 104857600).","maximum":209715200,"type":"integer","title":"socket.request.max.bytes","minimum":10485760},"log_cleaner_delete_retention_ms":{"description":"How long are delete records retained?","maximum":315569260000,"type":"integer","title":"log.cleaner.delete.retention.ms","minimum":0,"example":86400000},"log_segment_bytes":{"description":"The maximum size of a single log file","maximum":1073741824,"type":"integer","title":"log.segment.bytes","minimum":10485760},"offsets_retention_minutes":{"description":"Log retention window in minutes for offsets topic","maximum":2147483647,"type":"integer","title":"offsets.retention.minutes","minimum":1,"example":10080},"log_retention_ms":{"description":"The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.","maximum":9223372036854775807,"type":"integer","title":"log.retention.ms","minimum":-1}},"additionalProperties":false,"default":{},"type":"object","title":"Kafka broker configuration values"},"kafka-connect":{"properties":{"producer_buffer_memory":{"description":"The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).","maximum":134217728,"type":"integer","title":"The total bytes of memory the producer can use to buffer records waiting to be sent to the broker","minimum":5242880,"example":8388608},"consumer_max_poll_interval_ms":{"description":"The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).","maximum":2147483647,"type":"integer","title":"The maximum delay between polls when using consumer group management","minimum":1,"example":300000},"producer_compression_type":{"description":"Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.","enum":["gzip","snappy","lz4","zstd","none"],"type":"string","title":"The default compression type for producers"},"connector_client_config_override_policy":{"description":"Defines what client configurations can be overridden by the connector. Default is None","enum":["None","All"],"type":"string","title":"Client config override policy"},"offset_flush_interval_ms":{"description":"The interval at which to try committing offsets for tasks (defaults to 60000).","maximum":100000000,"type":"integer","title":"The interval at which to try committing offsets for tasks","minimum":1,"example":60000},"consumer_fetch_max_bytes":{"description":"Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.","maximum":104857600,"type":"integer","title":"The maximum amount of data the server should return for a fetch request","minimum":1048576,"example":52428800},"consumer_max_partition_fetch_bytes":{"description":"Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. ","maximum":104857600,"type":"integer","title":"The maximum amount of data per-partition the server will return.","minimum":1048576,"example":1048576},"offset_flush_timeout_ms":{"description":"Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).","maximum":2147483647,"type":"integer","title":"Offset flush timeout","minimum":1,"example":5000},"consumer_auto_offset_reset":{"description":"What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest","enum":["earliest","latest"],"type":"string","title":"Consumer auto offset reset"},"producer_max_request_size":{"description":"This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.","maximum":67108864,"type":"integer","title":"The maximum size of a request in bytes","minimum":131072,"example":1048576},"producer_batch_size":{"description":"This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will 'linger' for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).","maximum":5242880,"type":"integer","title":"The batch size in bytes the producer will attempt to collect for the same partition before publishing to broker","minimum":0,"example":1024},"session_timeout_ms":{"description":"The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).","maximum":2147483647,"type":"integer","title":"The timeout used to detect failures when using Kafka’s group management facilities","minimum":1,"example":10000},"producer_linger_ms":{"description":"This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will 'linger' for the specified time waiting for more records to show up. Defaults to 0.","maximum":5000,"type":"integer","title":"Wait for up to the given delay to allow batching records together","minimum":0,"example":100},"consumer_isolation_level":{"description":"Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.","enum":["read_uncommitted","read_committed"],"type":"string","title":"Consumer isolation level"},"consumer_max_poll_records":{"description":"The maximum number of records returned in a single call to poll() (defaults to 500).","maximum":10000,"type":"integer","title":"The maximum number of records returned by a single poll","minimum":1,"example":500}},"additionalProperties":false,"type":"object","title":"Kafka Connect configuration values"},"kafka-rest":{"properties":{"producer_compression_type":{"description":"Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.","enum":["gzip","snappy","lz4","zstd","none"],"type":"string","title":"producer.compression.type"},"consumer_enable_auto_commit":{"description":"If true the consumer's offset will be periodically committed to Kafka in the background","default":true,"type":"boolean","title":"consumer.enable.auto.commit"},"producer_acks":{"description":"The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to 'all' or '-1', the leader will wait for the full set of in-sync replicas to acknowledge the record.","enum":["all","-1","0","1"],"default":"1","type":"string","title":"producer.acks"},"consumer_request_max_bytes":{"description":"Maximum number of bytes in unencoded message keys and values by a single request","default":67108864,"maximum":671088640,"type":"integer","title":"consumer.request.max.bytes","minimum":0},"simpleconsumer_pool_size_max":{"description":"Maximum number of SimpleConsumers that can be instantiated per broker","default":25,"maximum":250,"type":"integer","title":"simpleconsumer.pool.size.max","minimum":10},"producer_linger_ms":{"description":"Wait for up to the given delay to allow batching records together","default":0,"maximum":5000,"type":"integer","title":"producer.linger.ms","minimum":0},"consumer_request_timeout_ms":{"description":"The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached","enum":[1000,15000,30000],"default":1000,"maximum":30000,"type":"integer","title":"consumer.request.timeout.ms","minimum":1000}},"additionalProperties":false,"type":"object","title":"Kafka REST configuration"},"schema-registry":{"properties":{"topic_name":{"description":"The durable single partition topic that acts as the durable log for the data. This topic must be compacted to avoid losing data due to retention policy. Please note that changing this configuration in an existing Schema Registry / Karapace setup leads to previous schemas being inaccessible, data encoded with them potentially unreadable and schema ID sequence put out of order. It's only possible to do the switch while Schema Registry / Karapace is disabled. Defaults to _schemas.","type":"string","minLength":1,"user_error":"Must consist of alpha-numeric characters, underscores, dashes or dots, max 249 characters","title":"topic_name","maxLength":249,"example":"_schemas","pattern":"^(?!\\.$|\\.\\.$)[-_.A-Za-z0-9]+$"},"leader_eligibility":{"description":"If true, Karapace / Schema Registry on the service nodes can participate in leader election. It might be needed to disable this when the schemas topic is replicated to a secondary cluster and Karapace / Schema Registry there must not participate in leader election. Defaults to true.","type":"boolean","title":"leader_eligibility","example":true}},"additionalProperties":false,"type":"object","title":"Schema Registry configuration"}}}`) +var kafkaSettings = exoscalesdk.GetDBAASSettingsKafkaResponseSettings{ + Kafka: &exoscalesdk.GetDBAASSettingsKafkaResponseSettingsKafka{ + Properties: map[string]any{ + "group_max_session_timeout_ms": map[string]any{ + "description": "The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.", + "maximum": 1800000, + "type": "integer", + "title": "group.max.session.timeout.ms", + "minimum": 0, + "example": 1800000, + }, + "log_flush_interval_messages": map[string]any{ + "description": "The number of messages accumulated on a log partition before messages are flushed to disk", + "maximum": 9223372036854775807, + "type": "integer", + "title": "log.flush.interval.messages", + "minimum": 1, + "example": 9223372036854775807, + }, + "max_connections_per_ip": map[string]any{ + "description": "The maximum number of connections allowed from each ip address (defaults to 2147483647).", + "maximum": 2147483647, + "type": "integer", + "title": "max.connections.per.ip", + "minimum": 256, + }, + "log_index_size_max_bytes": map[string]any{ + "description": "The maximum size in bytes of the offset index", + "maximum": 104857600, + "type": "integer", + "title": "log.index.size.max.bytes", + "minimum": 1048576, + "example": 10485760, + }, + "auto_create_topics_enable": map[string]any{ + "description": "Enable auto creation of topics", + "type": "boolean", + "title": "auto.create.topics.enable", + "example": true, + }, + "log_index_interval_bytes": map[string]any{ + "description": "The interval with which Kafka adds an entry to the offset index", + "maximum": 104857600, + "type": "integer", + "title": "log.index.interval.bytes", + "minimum": 0, + "example": 4096, + }, + "replica_fetch_max_bytes": map[string]any{ + "description": "The number of bytes of messages to attempt to fetch for each partition (defaults to 1048576). This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made.", + "maximum": 104857600, + "type": "integer", + "title": "replica.fetch.max.bytes", + "minimum": 1048576, + }, + "num_partitions": map[string]any{ + "description": "Number of partitions for autocreated topics", + "maximum": 1000, + "type": "integer", + "title": "num.partitions", + "minimum": 1, + }, + "transaction_state_log_segment_bytes": map[string]any{ + "description": "The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (defaults to 104857600 (100 mebibytes)).", + "maximum": 2147483647, + "type": "integer", + "title": "transaction.state.log.segment.bytes", + "minimum": 1048576, + "example": 104857600, + }, + "replica_fetch_response_max_bytes": map[string]any{ + "description": "Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.", + "maximum": 1048576000, + "type": "integer", + "title": "replica.fetch.response.max.bytes", + "minimum": 10485760, + }, + "log_message_timestamp_type": map[string]any{ + "description": "Define whether the timestamp in the message is message create time or log append time.", + "enum": []string{ + "CreateTime", + "LogAppendTime", + }, + "type": "string", + "title": "log.message.timestamp.type", + }, + "connections_max_idle_ms": map[string]any{ + "description": "Idle connections timeout: the server socket processor threads close the connections that idle for longer than this.", + "maximum": 3600000, + "type": "integer", + "title": "connections.max.idle.ms", + "minimum": 1000, + "example": 540000, + }, + "log_flush_interval_ms": map[string]any{ + "description": "The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used", + "maximum": 9223372036854775807, + "type": "integer", + "title": "log.flush.interval.ms", + "minimum": 0, + }, + "log_preallocate": map[string]any{ + "description": "Should pre allocate file when create new segment?", + "type": "boolean", + "title": "log.preallocate", + "example": false, + }, + "log_segment_delete_delay_ms": map[string]any{ + "description": "The amount of time to wait before deleting a file from the filesystem", + "maximum": 3600000, + "type": "integer", + "title": "log.segment.delete.delay.ms", + "minimum": 0, + "example": 60000, + }, + "message_max_bytes": map[string]any{ + "description": "The maximum size of message that the server can receive.", + "maximum": 100001200, + "type": "integer", + "title": "message.max.bytes", + "minimum": 0, + "example": 1048588, + }, + "log_cleaner_min_cleanable_ratio": map[string]any{ + "description": "Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option.", + "maximum": 0.9, + "type": "number", + "title": "log.cleaner.min.cleanable.ratio", + "minimum": 0.2, + "example": 0.5, + }, + "group_initial_rebalance_delay_ms": map[string]any{ + "description": "The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time.", + "maximum": 300000, + "type": "integer", + "title": "group.initial.rebalance.delay.ms", + "minimum": 0, + "example": 3000, + }, + "log_cleanup_policy": map[string]any{ + "description": "The default cleanup policy for segments beyond the retention window", + "enum": []string{ + "delete", + "compact", + "compact,delete", + }, + "type": "string", + "title": "log.cleanup.policy", + "example": "delete", + }, + "log_roll_jitter_ms": map[string]any{ + "description": "The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used", + "maximum": 9223372036854775807, + "type": "integer", + "title": "log.roll.jitter.ms", + "minimum": 0, + }, + "transaction_remove_expired_transaction_cleanup_interval_ms": map[string]any{ + "description": "The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (defaults to 3600000 (1 hour)).", + "maximum": 3600000, + "type": "integer", + "title": "transaction.remove.expired.transaction.cleanup.interval.ms", + "minimum": 600000, + "example": 3600000, + }, + "default_replication_factor": map[string]any{ + "description": "Replication factor for autocreated topics", + "maximum": 10, + "type": "integer", + "title": "default.replication.factor", + "minimum": 1, + }, + "log_roll_ms": map[string]any{ + "description": "The maximum time before a new log segment is rolled out (in milliseconds).", + "maximum": 9223372036854775807, + "type": "integer", + "title": "log.roll.ms", + "minimum": 1, + }, + "producer_purgatory_purge_interval_requests": map[string]any{ + "description": "The purge interval (in number of requests) of the producer request purgatory(defaults to 1000).", + "maximum": 10000, + "type": "integer", + "title": "producer.purgatory.purge.interval.requests", + "minimum": 10, + }, + "log_retention_bytes": map[string]any{ + "description": "The maximum size of the log before deleting messages", + "maximum": 9223372036854775807, + "type": "integer", + "title": "log.retention.bytes", + "minimum": -1, + }, + "log_cleaner_min_compaction_lag_ms": map[string]any{ + "description": "The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.", + "maximum": 9223372036854775807, + "type": "integer", + "title": "log.cleaner.min.compaction.lag.ms", + "minimum": 0, + }, + "min_insync_replicas": map[string]any{ + "description": "When a producer sets acks to 'all' (or '-1'), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful.", + "maximum": 7, + "type": "integer", + "title": "min.insync.replicas", + "minimum": 1, + "example": 1, + }, + "compression_type": map[string]any{ + "description": "Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer.", + "enum": []string{ + "gzip", + "snappy", + "lz4", + "zstd", + "uncompressed", + "producer", + }, + "type": "string", + "title": "compression.type", + }, + "log_message_timestamp_difference_max_ms": map[string]any{ + "description": "The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message", + "maximum": 9223372036854775807, + "type": "integer", + "title": "log.message.timestamp.difference.max.ms", + "minimum": 0, + }, + "log_message_downconversion_enable": map[string]any{ + "description": "This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. ", + "type": "boolean", + "title": "log.message.downconversion.enable", + "example": true, + }, + "max_incremental_fetch_session_cache_slots": map[string]any{ + "description": "The maximum number of incremental fetch sessions that the broker will maintain.", + "maximum": 10000, + "type": "integer", + "title": "max.incremental.fetch.session.cache.slots", + "minimum": 1000, + "example": 1000, + }, + "log_cleaner_max_compaction_lag_ms": map[string]any{ + "description": "The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted", + "maximum": 9223372036854775807, + "type": "integer", + "title": "log.cleaner.max.compaction.lag.ms", + "minimum": 30000, + }, + "log_retention_hours": map[string]any{ + "description": "The number of hours to keep a log file before deleting it", + "maximum": 2147483647, + "type": "integer", + "title": "log.retention.hours", + "minimum": -1, + }, + "group_min_session_timeout_ms": map[string]any{ + "description": "The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.", + "maximum": 60000, + "type": "integer", + "title": "group.min.session.timeout.ms", + "minimum": 0, + "example": 6000, + }, + "socket_request_max_bytes": map[string]any{ + "description": "The maximum number of bytes in a socket request (defaults to 104857600).", + "maximum": 209715200, + "type": "integer", + "title": "socket.request.max.bytes", + "minimum": 10485760, + }, + "log_cleaner_delete_retention_ms": map[string]any{ + "description": "How long are delete records retained?", + "maximum": 315569260000, + "type": "integer", + "title": "log.cleaner.delete.retention.ms", + "minimum": 0, + "example": 86400000, + }, + "log_segment_bytes": map[string]any{ + "description": "The maximum size of a single log file", + "maximum": 1073741824, + "type": "integer", + "title": "log.segment.bytes", + "minimum": 10485760, + }, + "offsets_retention_minutes": map[string]any{ + "description": "Log retention window in minutes for offsets topic", + "maximum": 2147483647, + "type": "integer", + "title": "offsets.retention.minutes", + "minimum": 1, + "example": 10080, + }, + "log_retention_ms": map[string]any{ + "description": "The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.", + "maximum": 9223372036854775807, + "type": "integer", + "title": "log.retention.ms", + "minimum": -1, + }, + }, + AdditionalProperties: ptr.To[bool](false), + Type: "object", + Title: "Kafka broker configuration values", + }, + KafkaConnect: &exoscalesdk.GetDBAASSettingsKafkaResponseSettingsKafkaConnect{ + Properties: map[string]any{ + "producer_buffer_memory": map[string]any{ + "description": "The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).", + "maximum": 134217728, + "type": "integer", + "title": "The total bytes of memory the producer can use to buffer records waiting to be sent to the broker", + "minimum": 5242880, + "example": 8388608, + }, + "consumer_max_poll_interval_ms": map[string]any{ + "description": "The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).", + "maximum": 2147483647, + "type": "integer", + "title": "The maximum delay between polls when using consumer group management", + "minimum": 1, + "example": 300000, + }, + "producer_compression_type": map[string]any{ + "description": "Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.", + "enum": []string{ + "gzip", + "snappy", + "lz4", + "zstd", + "none", + }, + "type": "string", + "title": "The default compression type for producers", + }, + "connector_client_config_override_policy": map[string]any{ + "description": "Defines what client configurations can be overridden by the connector. Default is None", + "enum": []string{ + "None", + "All", + }, + "type": "string", + "title": "Client config override policy", + }, + "offset_flush_interval_ms": map[string]any{ + "description": "The interval at which to try committing offsets for tasks (defaults to 60000).", + "maximum": 100000000, + "type": "integer", + "title": "The interval at which to try committing offsets for tasks", + "minimum": 1, + "example": 60000, + }, + "consumer_fetch_max_bytes": map[string]any{ + "description": "Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.", + "maximum": 104857600, + "type": "integer", + "title": "The maximum amount of data the server should return for a fetch request", + "minimum": 1048576, + "example": 52428800, + }, + "consumer_max_partition_fetch_bytes": map[string]any{ + "description": "Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. ", + "maximum": 104857600, + "type": "integer", + "title": "The maximum amount of data per-partition the server will return.", + "minimum": 1048576, + "example": 1048576, + }, + "offset_flush_timeout_ms": map[string]any{ + "description": "Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).", + "maximum": 2147483647, + "type": "integer", + "title": "Offset flush timeout", + "minimum": 1, + "example": 5000, + }, + "consumer_auto_offset_reset": map[string]any{ + "description": "What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest", + "enum": []string{ + "earliest", + "latest", + }, + "type": "string", + "title": "Consumer auto offset reset", + }, + "producer_max_request_size": map[string]any{ + "description": "This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.", + "maximum": 67108864, + "type": "integer", + "title": "The maximum size of a request in bytes", + "minimum": 131072, + "example": 1048576, + }, + "producer_batch_size": map[string]any{ + "description": "This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will 'linger' for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).", + "maximum": 5242880, + "type": "integer", + "title": "The batch size in bytes the producer will attempt to collect for the same partition before publishing to broker", + "minimum": 0, + "example": 1024, + }, + "session_timeout_ms": map[string]any{ + "description": "The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).", + "maximum": 2147483647, + "type": "integer", + "title": "The timeout used to detect failures when using Kafka’s group management facilities", + "minimum": 1, + "example": 10000, + }, + "producer_linger_ms": map[string]any{ + "description": "This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will 'linger' for the specified time waiting for more records to show up. Defaults to 0.", + "maximum": 5000, + "type": "integer", + "title": "Wait for up to the given delay to allow batching records together", + "minimum": 0, + "example": 100, + }, + "consumer_isolation_level": map[string]any{ + "description": "Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.", + "enum": []string{ + "read_uncommitted", + "read_committed", + }, + "type": "string", + "title": "Consumer isolation level", + }, + "consumer_max_poll_records": map[string]any{ + "description": "The maximum number of records returned in a single call to poll() (defaults to 500).", + "maximum": 10000, + "type": "integer", + "title": "The maximum number of records returned by a single poll", + "minimum": 1, + "example": 500, + }, + }, + AdditionalProperties: ptr.To[bool](false), + Type: "object", + Title: "Kafka Connect configuration values", + }, + KafkaRest: &exoscalesdk.GetDBAASSettingsKafkaResponseSettingsKafkaRest{ + Properties: map[string]any{ + "producer_compression_type": map[string]any{ + "description": "Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.", + "enum": []string{ + "gzip", + "snappy", + "lz4", + "zstd", + "none", + }, + "type": "string", + "title": "producer.compression.type", + }, + "consumer_enable_auto_commit": map[string]any{ + "description": "If true the consumer's offset will be periodically committed to Kafka in the background", + "default": true, + "type": "boolean", + "title": "consumer.enable.auto.commit", + }, + "producer_acks": map[string]any{ + "description": "The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to 'all' or '-1', the leader will wait for the full set of in-sync replicas to acknowledge the record.", + "enum": []string{ + "all", + "-1", + "0", + "1", + }, + "default": "1", + "type": "string", + "title": "producer.acks", + }, + "consumer_request_max_bytes": map[string]any{ + "description": "Maximum number of bytes in unencoded message keys and values by a single request", + "default": 67108864, + "maximum": 671088640, + "type": "integer", + "title": "consumer.request.max.bytes", + "minimum": 0, + }, + "simpleconsumer_pool_size_max": map[string]any{ + "description": "Maximum number of SimpleConsumers that can be instantiated per broker", + "default": 25, + "maximum": 250, + "type": "integer", + "title": "simpleconsumer.pool.size.max", + "minimum": 10, + }, + "producer_linger_ms": map[string]any{ + "description": "Wait for up to the given delay to allow batching records together", + "default": 0, + "maximum": 5000, + "type": "integer", + "title": "producer.linger.ms", + "minimum": 0, + }, + "consumer_request_timeout_ms": map[string]any{ + "description": "The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached", + "enum": []int{ + 1000, + 15000, + 30000, + }, + "default": 1000, + "maximum": 30000, + "type": "integer", + "title": "consumer.request.timeout.ms", + "minimum": 1000, + }, + }, + AdditionalProperties: ptr.To[bool](false), + Type: "object", + Title: "Kafka REST configuration", + }, + SchemaRegistry: &exoscalesdk.GetDBAASSettingsKafkaResponseSettingsSchemaRegistry{ + Properties: map[string]any{ + "topic_name": map[string]any{ + "description": "The durable single partition topic that acts as the durable log for the data. This topic must be compacted to avoid losing data due to retention policy. Please note that changing this configuration in an existing Schema Registry / Karapace setup leads to previous schemas being inaccessible, data encoded with them potentially unreadable and schema ID sequence put out of order. It's only possible to do the switch while Schema Registry / Karapace is disabled. Defaults to _schemas.", + "type": "string", + "minLength": 1, + "user_error": "Must consist of alpha-numeric characters, underscores, dashes or dots, max 249 characters", + "title": "topic_name", + "maxLength": 249, + "example": "_schemas", + "pattern": "^(?!\\.$|\\.\\.$)[-_.A-Za-z0-9]+$", + }, + "leader_eligibility": map[string]any{ + "description": "If true, Karapace / Schema Registry on the service nodes can participate in leader election. It might be needed to disable this when the schemas topic is replicated to a secondary cluster and Karapace / Schema Registry there must not participate in leader election. Defaults to true.", + "type": "boolean", + "title": "leader_eligibility", + "example": true, + }, + }, + AdditionalProperties: ptr.To[bool](false), + Type: "object", + Title: "Schema Registry configuration", + }, +} diff --git a/operator/kafkacontroller/update.go b/operator/kafkacontroller/update.go index 5ffa5c15..107bcf4b 100644 --- a/operator/kafkacontroller/update.go +++ b/operator/kafkacontroller/update.go @@ -26,15 +26,19 @@ func (p *pipeline) Update(ctx context.Context, mg resource.Managed) (managed.Ext spec := instance.Spec.ForProvider ipFilter := []string(spec.IPFilter) settings := exoscalesdk.JSONSchemaKafka{} - err := json.Unmarshal(spec.KafkaSettings.Raw, &settings) - if err != nil { - return managed.ExternalUpdate{}, fmt.Errorf("cannot map kafkaInstance settings: %w", err) + if len(spec.KafkaSettings.Raw) != 0 { + err := json.Unmarshal(spec.KafkaSettings.Raw, &settings) + if err != nil { + return managed.ExternalUpdate{}, fmt.Errorf("cannot map kafkaInstance settings: %w", err) + } } restSettings := exoscalesdk.JSONSchemaKafkaRest{} - err = json.Unmarshal(spec.KafkaRestSettings.Raw, &restSettings) - if err != nil { - return managed.ExternalUpdate{}, fmt.Errorf("invalid kafka rest settings: %w", err) + if len(spec.KafkaRestSettings.Raw) != 0 { + err := json.Unmarshal(spec.KafkaRestSettings.Raw, &restSettings) + if err != nil { + return managed.ExternalUpdate{}, fmt.Errorf("invalid kafka rest settings: %w", err) + } } body := exoscalesdk.UpdateDBAASServiceKafkaRequest{ diff --git a/operator/kafkacontroller/update_test.go b/operator/kafkacontroller/update_test.go index 2401f8f7..a025ab94 100644 --- a/operator/kafkacontroller/update_test.go +++ b/operator/kafkacontroller/update_test.go @@ -1,3 +1,5 @@ +// go:build ignore + package kafkacontroller import ( diff --git a/operator/kafkacontroller/webhook.go b/operator/kafkacontroller/webhook.go index 9b520835..739ba40e 100644 --- a/operator/kafkacontroller/webhook.go +++ b/operator/kafkacontroller/webhook.go @@ -4,11 +4,11 @@ import ( "context" "fmt" - "github.com/exoscale/egoscale/v2/oapi" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" - exoscalesdk "github.com/exoscale/egoscale/v2" + exoscalesdk "github.com/exoscale/egoscale/v3" exoscalev1 "github.com/vshn/provider-exoscale/apis/exoscale/v1" + "github.com/vshn/provider-exoscale/operator/common" "github.com/vshn/provider-exoscale/operator/pipelineutil" "github.com/vshn/provider-exoscale/operator/webhook" "sigs.k8s.io/controller-runtime/pkg/client" @@ -29,14 +29,14 @@ type Validator struct { func (v *Validator) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { instance := obj.(*exoscalev1.Kafka) v.log.V(1).Info("get kafka available versions") - exo, err := pipelineutil.OpenExoscaleClient(ctx, v.kube, instance.GetProviderConfigName(), exoscalesdk.ClientOptWithAPIEndpoint(fmt.Sprintf("https://api-%s.exoscale.com", instance.Spec.ForProvider.Zone))) + exo, err := pipelineutil.OpenExoscaleClient(ctx, v.kube, instance.GetProviderConfigName(), exoscalesdk.ClientOptWithEndpoint(common.ZoneTranslation[instance.Spec.ForProvider.Zone])) if err != nil { return nil, fmt.Errorf("open exoscale client failed: %w", err) } return nil, v.validateCreateWithExoClient(ctx, obj, exo.Exoscale) } -func (v *Validator) validateCreateWithExoClient(ctx context.Context, obj runtime.Object, exo oapi.ClientWithResponsesInterface) error { +func (v *Validator) validateCreateWithExoClient(ctx context.Context, obj runtime.Object, exo *exoscalesdk.Client) error { instance, ok := obj.(*exoscalev1.Kafka) if !ok { return fmt.Errorf("invalid managed resource type %T for kafka webhook", obj) @@ -49,29 +49,28 @@ func (v *Validator) validateCreateWithExoClient(ctx context.Context, obj runtime return err } - err = v.validateVersion(ctx, obj, *availableVersions) + err = v.validateVersion(ctx, obj, availableVersions) if err != nil { - return fmt.Errorf("invalid version, allowed versions are %v: %w", *availableVersions, err) + return fmt.Errorf("invalid version, allowed versions are %v: %w", availableVersions, err) } } return validateSpec(instance.Spec.ForProvider) } -func (v *Validator) getAvailableVersions(ctx context.Context, exo oapi.ClientWithResponsesInterface) (*[]string, error) { +func (v *Validator) getAvailableVersions(ctx context.Context, exo *exoscalesdk.Client) ([]string, error) { // get kafka available versions - resp, err := exo.GetDbaasServiceTypeWithResponse(ctx, serviceType) + resp, err := exo.GetDBAASServiceType(ctx, serviceType) if err != nil { return nil, fmt.Errorf("get DBaaS service type failed: %w", err) } - v.log.V(1).Info("DBaaS service type", "body", string(resp.Body)) + v.log.V(1).Info("DBaaS service type", "name", string(resp.Name), "description", string(resp.Description)) - serviceType := *resp.JSON200 - if serviceType.AvailableVersions == nil { + if resp.AvailableVersions == nil { return nil, fmt.Errorf("kafka available versions not found") } - return serviceType.AvailableVersions, nil + return resp.AvailableVersions, nil } func (v *Validator) validateVersion(_ context.Context, obj runtime.Object, availableVersions []string) error { diff --git a/operator/kafkacontroller/webhook_test.go b/operator/kafkacontroller/webhook_test.go index 4e327cef..d010d5be 100644 --- a/operator/kafkacontroller/webhook_test.go +++ b/operator/kafkacontroller/webhook_test.go @@ -1,3 +1,5 @@ +// go:build ignore + package kafkacontroller import ( diff --git a/operator/mapper/alias.go b/operator/mapper/alias.go index 1185d2e5..83b7564f 100644 --- a/operator/mapper/alias.go +++ b/operator/mapper/alias.go @@ -3,7 +3,6 @@ package mapper import ( "fmt" - "github.com/exoscale/egoscale/v2/oapi" exoscalesdk "github.com/exoscale/egoscale/v3" exoscalev1 "github.com/vshn/provider-exoscale/apis/exoscale/v1" @@ -73,7 +72,7 @@ func ToNotifications(notifications []exoscalesdk.DBAASServiceNotification) ([]ex return s, nil } -func ToBackupSpec(schedule *BackupSchedule) exoscalev1.BackupSpec { +func ToBackupSpec(schedule *exoscalesdk.DBAASServiceMysqlBackupSchedule) exoscalev1.BackupSpec { if schedule == nil { return exoscalev1.BackupSpec{} } @@ -108,7 +107,7 @@ func ToDBaaSParameters(tp *bool, plan string, ipf *[]string) exoscalev1.DBaaSPar } } -func ToMaintenance(m *oapi.DbaasServiceMaintenance) exoscalev1.MaintenanceSpec { +func ToMaintenance(m *exoscalesdk.DBAASServiceMaintenance) exoscalev1.MaintenanceSpec { return exoscalev1.MaintenanceSpec{ DayOfWeek: m.Dow, TimeOfDay: exoscalev1.TimeOfDay(m.Time), diff --git a/operator/mapper/alias_test.go b/operator/mapper/alias_test.go index 8eb92c73..8b59888c 100644 --- a/operator/mapper/alias_test.go +++ b/operator/mapper/alias_test.go @@ -3,15 +3,14 @@ package mapper import ( "testing" - "github.com/exoscale/egoscale/v2/oapi" + exoscalesdk "github.com/exoscale/egoscale/v3" "github.com/stretchr/testify/assert" exoscalev1 "github.com/vshn/provider-exoscale/apis/exoscale/v1" - "k8s.io/utils/ptr" ) func TestToBackupSpec(t *testing.T) { tests := map[string]struct { - givenSchedule *BackupSchedule + givenSchedule *exoscalesdk.DBAASServiceMysqlBackupSchedule expectedSpec exoscalev1.BackupSpec }{ "NilSchedule": { @@ -19,15 +18,15 @@ func TestToBackupSpec(t *testing.T) { expectedSpec: exoscalev1.BackupSpec{}, }, "ScheduleWithZero": { - givenSchedule: &BackupSchedule{BackupHour: ptr.To[int64](0), BackupMinute: ptr.To[int64](0)}, + givenSchedule: &exoscalesdk.DBAASServiceMysqlBackupSchedule{BackupHour: 0, BackupMinute: 0}, expectedSpec: exoscalev1.BackupSpec{TimeOfDay: exoscalev1.TimeOfDay("00:00:00")}, }, "ScheduleWithoutNumbers": { - givenSchedule: &BackupSchedule{}, + givenSchedule: &exoscalesdk.DBAASServiceMysqlBackupSchedule{}, expectedSpec: exoscalev1.BackupSpec{TimeOfDay: exoscalev1.TimeOfDay("00:00:00")}, }, "ScheduleWithNumbers": { - givenSchedule: &BackupSchedule{BackupHour: ptr.To[int64](12), BackupMinute: ptr.To[int64](34)}, + givenSchedule: &exoscalesdk.DBAASServiceMysqlBackupSchedule{BackupHour: 12, BackupMinute: 34}, expectedSpec: exoscalev1.BackupSpec{TimeOfDay: exoscalev1.TimeOfDay("12:34:00")}, }, } @@ -46,11 +45,11 @@ func TestToBackupSchedule(t *testing.T) { }{ "EmptyTime": { givenTime: "0:00:00", - expectedSchedule: BackupSchedule{BackupHour: ptr.To[int64](0), BackupMinute: ptr.To[int64](0)}, + expectedSchedule: BackupSchedule{BackupHour: 0, BackupMinute: 0}, }, "TimeGiven": { givenTime: "12:34:56", - expectedSchedule: BackupSchedule{BackupHour: ptr.To[int64](12), BackupMinute: ptr.To[int64](34)}, + expectedSchedule: BackupSchedule{BackupHour: 12, BackupMinute: 34}, }, } for name, tc := range tests { @@ -63,23 +62,23 @@ func TestToBackupSchedule(t *testing.T) { } func TestToNodeState(t *testing.T) { - roleMaster := oapi.DbaasNodeStateRoleMaster - roleReplica := oapi.DbaasNodeStateRoleReadReplica + roleMaster := exoscalesdk.DBAASNodeStateRoleMaster + roleReplica := exoscalesdk.DBAASNodeStateRoleReadReplica tests := map[string]struct { - given *[]oapi.DbaasNodeState + given *[]exoscalesdk.DBAASNodeState expect []exoscalev1.NodeState }{ "Normal": { - given: &[]oapi.DbaasNodeState{ + given: &[]exoscalesdk.DBAASNodeState{ { Name: "foo", - Role: &roleMaster, + Role: roleMaster, State: "running", }, { Name: "bar", - Role: &roleReplica, + Role: roleReplica, State: "running", }, }, @@ -98,7 +97,7 @@ func TestToNodeState(t *testing.T) { }, "Nil": {}, "NilRole": { - given: &[]oapi.DbaasNodeState{ + given: &[]exoscalesdk.DBAASNodeState{ { Name: "foo", State: "running", @@ -123,7 +122,7 @@ func TestToNodeState(t *testing.T) { for name, tc := range tests { t.Run(name, func(t *testing.T) { assert.NotPanics(t, func() { - res := ToNodeStates(tc.given) + res := ToNodeStates(*tc.given) assert.EqualValues(t, tc.expect, res) }) }) diff --git a/operator/mysqlcontroller/create.go b/operator/mysqlcontroller/create.go index 3f6d723c..f516bcad 100644 --- a/operator/mysqlcontroller/create.go +++ b/operator/mysqlcontroller/create.go @@ -24,9 +24,11 @@ func (p *pipeline) Create(ctx context.Context, mg resource.Managed) (managed.Ext spec := mySQLInstance.Spec.ForProvider ipFilter := []string(spec.IPFilter) settings := exoscalesdk.JSONSchemaMysql{} - err := json.Unmarshal(spec.MySQLSettings.Raw, &settings) - if err != nil { - return managed.ExternalCreation{}, fmt.Errorf("cannot map mySQLInstance settings: %w", err) + if len(spec.MySQLSettings.Raw) != 0 { + err := json.Unmarshal(spec.MySQLSettings.Raw, &settings) + if err != nil { + return managed.ExternalCreation{}, fmt.Errorf("cannot map mySQLInstance settings: %w", err) + } } backupSchedule, err := mapper.ToBackupSchedule(spec.Backup.TimeOfDay) if err != nil { @@ -53,6 +55,6 @@ func (p *pipeline) Create(ctx context.Context, mg resource.Managed) (managed.Ext return managed.ExternalCreation{}, fmt.Errorf("cannot create mySQLInstance: %w", err) } - log.V(1).Info("response", "message", string(resp.Message)) + log.V(1).Info("response", "message", resp.Message) return managed.ExternalCreation{}, nil } diff --git a/operator/mysqlcontroller/delete.go b/operator/mysqlcontroller/delete.go index c70e4817..b83b20b0 100644 --- a/operator/mysqlcontroller/delete.go +++ b/operator/mysqlcontroller/delete.go @@ -19,6 +19,6 @@ func (p *pipeline) Delete(ctx context.Context, mg resource.Managed) error { if err != nil { return fmt.Errorf("cannot delete mySQLInstance: %w", err) } - log.V(1).Info("response", "json", string(resp.Message)) + log.V(1).Info("response", "message", string(resp.Message)) return nil } diff --git a/operator/mysqlcontroller/observe.go b/operator/mysqlcontroller/observe.go index 1db9e0d2..b62068b0 100644 --- a/operator/mysqlcontroller/observe.go +++ b/operator/mysqlcontroller/observe.go @@ -2,6 +2,7 @@ package mysqlcontroller import ( "context" + "encoding/json" "errors" "fmt" "net/url" @@ -9,8 +10,8 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" "github.com/crossplane/crossplane-runtime/pkg/resource" - exoscaleapi "github.com/exoscale/egoscale/v2/api" exoscalesdk "github.com/exoscale/egoscale/v3" + "k8s.io/apimachinery/pkg/runtime" "github.com/go-logr/logr" exoscalev1 "github.com/vshn/provider-exoscale/apis/exoscale/v1" @@ -27,7 +28,7 @@ func (p *pipeline) Observe(ctx context.Context, mg resource.Managed) (managed.Ex mysql, err := p.exo.GetDBAASServiceMysql(ctx, mySQLInstance.GetInstanceName()) if err != nil { - if errors.Is(err, exoscaleapi.ErrNotFound) { + if errors.Is(err, exoscalesdk.ErrNotFound) { return managed.ExternalObservation{ResourceExists: false}, nil } return managed.ExternalObservation{}, fmt.Errorf("cannot observe mySQLInstance: %w", err) @@ -61,16 +62,16 @@ func (p *pipeline) Observe(ctx context.Context, mg resource.Managed) (managed.Ex return managed.ExternalObservation{}, fmt.Errorf("cannot retrieve CA certificate: %w", err) } - connDetails, err := connectionDetails(mysql, caCert.Certificate) + connDetails, err := connectionDetails(ctx, mysql, caCert.Certificate, p.exo) if err != nil { return managed.ExternalObservation{}, fmt.Errorf("cannot parse connection details: %w", err) } - params, err := mapParameters(mysql, mySQLInstance.Spec.ForProvider.Zone.String()) + params, err := mapParameters(mysql, mySQLInstance.Spec.ForProvider.Zone) if err != nil { return managed.ExternalObservation{}, fmt.Errorf("cannot parse parameters: %w", err) } - currentParams, err := setSettingsDefaults(ctx, p.exo, &mySQLInstance.Spec.ForProvider) + currentParams, err := setSettingsDefaults(ctx, *p.exo, &mySQLInstance.Spec.ForProvider) if err != nil { log.Error(err, "unable to set mysql settings schema") currentParams = &mySQLInstance.Spec.ForProvider @@ -83,7 +84,7 @@ func (p *pipeline) Observe(ctx context.Context, mg resource.Managed) (managed.Ex }, nil } -func connectionDetails(in *exoscalesdk.DBAASServiceMysql, ca string) (managed.ConnectionDetails, error) { +func connectionDetails(ctx context.Context, in *exoscalesdk.DBAASServiceMysql, ca string, client *exoscalesdk.Client) (managed.ConnectionDetails, error) { uri := in.URI // uri may be absent if uri == "" { @@ -96,10 +97,13 @@ func connectionDetails(in *exoscalesdk.DBAASServiceMysql, ca string) (managed.Co if err != nil { return nil, fmt.Errorf("cannot parse connection URI: %w", err) } - password, _ := parsed.User.Password() + password, err := client.RevealDBAASMysqlUserPassword(ctx, string(in.Name), parsed.User.Username()) + if err != nil { + return nil, fmt.Errorf("cannot reveal password for MySQL instance: %w", err) + } return map[string][]byte{ "MYSQL_USER": []byte(parsed.User.Username()), - "MYSQL_PASSWORD": []byte(password), + "MYSQL_PASSWORD": []byte(password.Password), "MYSQL_URL": []byte(uri), "MYSQL_DB": []byte(strings.TrimPrefix(parsed.Path, "/")), "MYSQL_HOST": []byte(parsed.Hostname()), @@ -138,18 +142,28 @@ func isUpToDate(current, external *exoscalev1.MySQLParameters, log logr.Logger) } func mapObservation(instance *exoscalesdk.DBAASServiceMysql) (exoscalev1.MySQLObservation, error) { + + jsonSettings, err := json.Marshal(instance.MysqlSettings) + if err != nil { + return exoscalev1.MySQLObservation{}, fmt.Errorf("error parsing MysqlSettings") + } + + settings := runtime.RawExtension{Raw: jsonSettings} + + nodeStates := []exoscalev1.NodeState{} + if instance.NodeStates != nil { + nodeStates = mapper.ToNodeStates(instance.NodeStates) + } observation := exoscalev1.MySQLObservation{ Version: instance.Version, - NodeStates: instance.NodeStates, + NodeStates: nodeStates, } - settings := instance.MysqlSettings - observation.MySQLSettings = settings - observation.DBaaSParameters = mapper.ToDBaaSParameters(instance.TerminationProtection, instance.Plan, instance.IpFilter) + observation.DBaaSParameters = mapper.ToDBaaSParameters(instance.TerminationProtection, instance.Plan, &instance.IPFilter) observation.Maintenance = mapper.ToMaintenance(instance.Maintenance) - observation.Backup = mapper.ToBackupSpec(instance.BackupSchedule) + observation.Backup = toBackupSpec(instance.BackupSchedule) notifications, err := mapper.ToNotifications(instance.Notifications) if err != nil { @@ -160,16 +174,21 @@ func mapObservation(instance *exoscalesdk.DBAASServiceMysql) (exoscalev1.MySQLOb return observation, nil } -func mapParameters(in *exoscalesdk.DBAASServiceMysql, zone string) (*exoscalev1.MySQLParameters, error) { - settings := in.MysqlSettings +func mapParameters(in *exoscalesdk.DBAASServiceMysql, zone exoscalev1.Zone) (*exoscalev1.MySQLParameters, error) { + jsonSettings, err := json.Marshal(in.MysqlSettings) + if err != nil { + return nil, fmt.Errorf("cannot parse mysqlInstance settings: %w", err) + } + + settings := runtime.RawExtension{Raw: jsonSettings} return &exoscalev1.MySQLParameters{ Maintenance: exoscalev1.MaintenanceSpec{ DayOfWeek: in.Maintenance.Dow, TimeOfDay: exoscalev1.TimeOfDay(in.Maintenance.Time), }, - Backup: *in.BackupSchedule, - Zone: exoscalev1.Zone(zone), + Backup: toBackupSpec(in.BackupSchedule), + Zone: zone, Version: in.Version, DBaaSParameters: exoscalev1.DBaaSParameters{ TerminationProtection: *in.TerminationProtection, @@ -181,3 +200,11 @@ func mapParameters(in *exoscalesdk.DBAASServiceMysql, zone string) (*exoscalev1. MySQLSettings: settings, }, nil } + +func toBackupSpec(schedule *exoscalesdk.DBAASServiceMysqlBackupSchedule) exoscalev1.BackupSpec { + if schedule == nil { + return exoscalev1.BackupSpec{} + } + hour, min := schedule.BackupHour, schedule.BackupMinute + return exoscalev1.BackupSpec{TimeOfDay: exoscalev1.TimeOfDay(fmt.Sprintf("%02d:%02d:00", hour, min))} +} diff --git a/operator/mysqlcontroller/settings.go b/operator/mysqlcontroller/settings.go index dcc9c632..0eb03f72 100644 --- a/operator/mysqlcontroller/settings.go +++ b/operator/mysqlcontroller/settings.go @@ -2,6 +2,7 @@ package mysqlcontroller import ( "context" + "encoding/json" exoscalesdk "github.com/exoscale/egoscale/v3" exoscalev1 "github.com/vshn/provider-exoscale/apis/exoscale/v1" @@ -10,7 +11,7 @@ import ( ) type settingsFetcher interface { - GetDbaasSettingsMysqlWithResponse(ctx context.Context) (*exoscalesdk.GetDBAASSettingsMysqlResponse, error) + GetDBAASSettingsMysql(ctx context.Context) (*exoscalesdk.GetDBAASSettingsMysqlResponse, error) } func setSettingsDefaults(ctx context.Context, f settingsFetcher, in *exoscalev1.MySQLParameters) (*exoscalev1.MySQLParameters, error) { @@ -29,11 +30,15 @@ func setSettingsDefaults(ctx context.Context, f settingsFetcher, in *exoscalev1. } func fetchSettingSchema(ctx context.Context, f settingsFetcher) (settings.Schemas, error) { - resp, err := f.GetDbaasSettingsMysqlWithResponse(ctx) + resp, err := f.GetDBAASSettingsMysql(ctx) if err != nil { return nil, err } - schemas, err := settings.ParseSchemas(resp.Body) + settingsJson, err := json.Marshal(resp) + if err != nil { + return nil, err + } + schemas, err := settings.ParseSchemas(settingsJson) if err != nil { return nil, err } diff --git a/operator/mysqlcontroller/settings_test.go b/operator/mysqlcontroller/settings_test.go index 398e6c81..0e1983b1 100644 --- a/operator/mysqlcontroller/settings_test.go +++ b/operator/mysqlcontroller/settings_test.go @@ -4,19 +4,20 @@ import ( "context" "testing" - "github.com/exoscale/egoscale/v2/oapi" + exoscalesdk "github.com/exoscale/egoscale/v3" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" exoscalev1 "github.com/vshn/provider-exoscale/apis/exoscale/v1" "github.com/vshn/provider-exoscale/operator/mapper" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/ptr" ) type fakeSettingsFetcher struct{} -func (fakeSettingsFetcher) GetDbaasSettingsMysqlWithResponse(ctx context.Context, reqEditors ...oapi.RequestEditorFn) (*oapi.GetDbaasSettingsMysqlResponse, error) { - return &oapi.GetDbaasSettingsMysqlResponse{ - Body: rawResponse, +func (fakeSettingsFetcher) GetDBAASSettingsMysql(ctx context.Context) (*exoscalesdk.GetDBAASSettingsMysqlResponse, error) { + return &exoscalesdk.GetDBAASSettingsMysqlResponse{ + Settings: &mysqlSettings, }, nil } @@ -49,4 +50,250 @@ func TestDefaultSettings(t *testing.T) { assert.Len(t, setingsWithDefaults, 1) } -var rawResponse = []byte(`{"settings":{"mysql":{"properties":{"net_write_timeout":{"description":"The number of seconds to wait for a block to be written to a connection before aborting the write.","maximum":3600,"type":"integer","title":"net_write_timeout","minimum":1,"example":30},"internal_tmp_mem_storage_engine":{"description":"The storage engine for in-memory internal temporary tables.","enum":["TempTable","MEMORY"],"type":"string","title":"internal_tmp_mem_storage_engine","example":"TempTable"},"sql_mode":{"description":"Global SQL mode. Set to empty to use MySQL server defaults. When creating a new service and not setting this field Aiven default SQL mode (strict, SQL standard compliant) will be assigned.","type":"string","user_error":"Must be uppercase alphabetic characters, underscores and commas","title":"sql_mode","maxLength":1024,"example":"ANSI,TRADITIONAL","pattern":"^[A-Z_]*(,[A-Z_]+)*$"},"information_schema_stats_expiry":{"description":"The time, in seconds, before cached statistics expire","maximum":31536000,"type":"integer","title":"information_schema_stats_expiry","minimum":900,"example":86400},"sort_buffer_size":{"description":"Sort buffer size in bytes for ORDER BY optimization. Default is 262144 (256K)","maximum":1073741824,"type":"integer","title":"sort_buffer_size","minimum":32768,"example":262144},"innodb_thread_concurrency":{"description":"Defines the maximum number of threads permitted inside of InnoDB. Default is 0 (infinite concurrency - no limit)","maximum":1000,"type":"integer","title":"innodb_thread_concurrency","minimum":0,"example":10},"innodb_write_io_threads":{"description":"The number of I/O threads for write operations in InnoDB. Default is 4. Changing this parameter will lead to a restart of the MySQL service.","maximum":64,"type":"integer","title":"innodb_write_io_threads","minimum":1,"example":10},"innodb_ft_min_token_size":{"description":"Minimum length of words that are stored in an InnoDB FULLTEXT index. Changing this parameter will lead to a restart of the MySQL service.","maximum":16,"type":"integer","title":"innodb_ft_min_token_size","minimum":0,"example":3},"innodb_change_buffer_max_size":{"description":"Maximum size for the InnoDB change buffer, as a percentage of the total size of the buffer pool. Default is 25","maximum":50,"type":"integer","title":"innodb_change_buffer_max_size","minimum":0,"example":30},"innodb_flush_neighbors":{"description":"Specifies whether flushing a page from the InnoDB buffer pool also flushes other dirty pages in the same extent (default is 1): 0 - dirty pages in the same extent are not flushed, 1 - flush contiguous dirty pages in the same extent, 2 - flush dirty pages in the same extent","maximum":2,"type":"integer","title":"innodb_flush_neighbors","minimum":0,"example":0},"tmp_table_size":{"description":"Limits the size of internal in-memory tables. Also set max_heap_table_size. Default is 16777216 (16M)","maximum":1073741824,"type":"integer","title":"tmp_table_size","minimum":1048576,"example":16777216},"slow_query_log":{"description":"Slow query log enables capturing of slow queries. Setting slow_query_log to false also truncates the mysql.slow_log table. Default is off","type":"boolean","title":"slow_query_log","example":true},"connect_timeout":{"description":"The number of seconds that the mysqld server waits for a connect packet before responding with Bad handshake","maximum":3600,"type":"integer","title":"connect_timeout","minimum":2,"example":10},"net_read_timeout":{"description":"The number of seconds to wait for more data from a connection before aborting the read.","maximum":3600,"type":"integer","title":"net_read_timeout","minimum":1,"example":30},"innodb_lock_wait_timeout":{"description":"The length of time in seconds an InnoDB transaction waits for a row lock before giving up.","maximum":3600,"type":"integer","title":"innodb_lock_wait_timeout","minimum":1,"example":50},"wait_timeout":{"description":"The number of seconds the server waits for activity on a noninteractive connection before closing it.","maximum":2147483,"type":"integer","title":"wait_timeout","minimum":1,"example":28800},"innodb_rollback_on_timeout":{"description":"When enabled a transaction timeout causes InnoDB to abort and roll back the entire transaction. Changing this parameter will lead to a restart of the MySQL service.","type":"boolean","title":"innodb_rollback_on_timeout","example":true},"group_concat_max_len":{"description":"The maximum permitted result length in bytes for the GROUP_CONCAT() function.","maximum":18446744073709551615,"type":"integer","title":"group_concat_max_len","minimum":4,"example":1024},"net_buffer_length":{"description":"Start sizes of connection buffer and result buffer. Default is 16384 (16K). Changing this parameter will lead to a restart of the MySQL service.","maximum":1048576,"type":"integer","title":"net_buffer_length","minimum":1024,"example":16384},"innodb_print_all_deadlocks":{"description":"When enabled, information about all deadlocks in InnoDB user transactions is recorded in the error log. Disabled by default.","type":"boolean","title":"innodb_print_all_deadlocks","example":true},"innodb_online_alter_log_max_size":{"description":"The upper limit in bytes on the size of the temporary log files used during online DDL operations for InnoDB tables.","maximum":1099511627776,"type":"integer","title":"innodb_online_alter_log_max_size","minimum":65536,"example":134217728},"interactive_timeout":{"description":"The number of seconds the server waits for activity on an interactive connection before closing it.","maximum":604800,"type":"integer","title":"interactive_timeout","minimum":30,"example":3600},"innodb_log_buffer_size":{"description":"The size in bytes of the buffer that InnoDB uses to write to the log files on disk.","maximum":4294967295,"type":"integer","title":"innodb_log_buffer_size","minimum":1048576,"example":16777216},"max_allowed_packet":{"description":"Size of the largest message in bytes that can be received by the server. Default is 67108864 (64M)","maximum":1073741824,"type":"integer","title":"max_allowed_packet","minimum":102400,"example":67108864},"max_heap_table_size":{"description":"Limits the size of internal in-memory tables. Also set tmp_table_size. Default is 16777216 (16M)","maximum":1073741824,"type":"integer","title":"max_heap_table_size","minimum":1048576,"example":16777216},"innodb_ft_server_stopword_table":{"description":"This option is used to specify your own InnoDB FULLTEXT index stopword list for all InnoDB tables.","type":["null","string"],"title":"innodb_ft_server_stopword_table","maxLength":1024,"example":"db_name/table_name","pattern":"^.+/.+$"},"innodb_read_io_threads":{"description":"The number of I/O threads for read operations in InnoDB. Default is 4. Changing this parameter will lead to a restart of the MySQL service.","maximum":64,"type":"integer","title":"innodb_read_io_threads","minimum":1,"example":10},"sql_require_primary_key":{"description":"Require primary key to be defined for new tables or old tables modified with ALTER TABLE and fail if missing. It is recommended to always have primary keys because various functionality may break if any large table is missing them.","type":"boolean","title":"sql_require_primary_key","example":true},"default_time_zone":{"description":"Default server time zone as an offset from UTC (from -12:00 to +12:00), a time zone name, or 'SYSTEM' to use the MySQL server default.","type":"string","minLength":2,"title":"default_time_zone","maxLength":100,"example":"+03:00"},"long_query_time":{"description":"The slow_query_logs work as SQL statements that take more than long_query_time seconds to execute. Default is 10s","maximum":3600,"type":"number","title":"long_query_time","minimum":0.0,"example":10}},"additionalProperties":false,"type":"object","title":"mysql.conf configuration values"}}}`) +var mysqlSettings = exoscalesdk.GetDBAASSettingsMysqlResponseSettings{ + Mysql: &exoscalesdk.GetDBAASSettingsMysqlResponseSettingsMysql{ + Properties: map[string]any{ + "net_write_timeout": map[string]any{ + "description": "The number of seconds to wait for a block to be written to a connection before aborting the write.", + "maximum": 3600, + "type": "integer", + "title": "net_write_timeout", + "minimum": 1, + "example": 30, + }, + "internal_tmp_mem_storage_engine": map[string]any{ + "description": "The storage engine for in-memory internal temporary tables.", + "enum": []string{ + "TempTable", + "MEMORY", + }, + "type": "string", + "title": "internal_tmp_mem_storage_engine", + "example": "TempTable", + }, + "sql_mode": map[string]any{ + "description": "Global SQL mode. Set to empty to use MySQL server defaults. When creating a new service and not setting this field Aiven default SQL mode (strict, SQL standard compliant) will be assigned.", + "type": "string", + "user_error": "Must be uppercase alphabetic characters, underscores and commas", + "title": "sql_mode", + "maxLength": 1024, + "example": "ANSI,TRADITIONAL", + "pattern": "^[A-Z_]*(,[A-Z_]+)*$", + }, + "information_schema_stats_expiry": map[string]any{ + "description": "The time, in seconds, before cached statistics expire", + "maximum": 31536000, + "type": "integer", + "title": "information_schema_stats_expiry", + "minimum": 900, + "example": 86400, + }, + "sort_buffer_size": map[string]any{ + "description": "Sort buffer size in bytes for ORDER BY optimization. Default is 262144 (256K)", + "maximum": 1073741824, + "type": "integer", + "title": "sort_buffer_size", + "minimum": 32768, + "example": 262144, + }, + "innodb_thread_concurrency": map[string]any{ + "description": "Defines the maximum number of threads permitted inside of InnoDB. Default is 0 (infinite concurrency - no limit)", + "maximum": 1000, + "type": "integer", + "title": "innodb_thread_concurrency", + "minimum": 0, + "example": 10, + }, + "innodb_write_io_threads": map[string]any{ + "description": "The number of I/O threads for write operations in InnoDB. Default is 4. Changing this parameter will lead to a restart of the MySQL service.", + "maximum": 64, + "type": "integer", + "title": "innodb_write_io_threads", + "minimum": 1, + "example": 10, + }, + "innodb_ft_min_token_size": map[string]any{ + "description": "Minimum length of words that are stored in an InnoDB FULLTEXT index. Changing this parameter will lead to a restart of the MySQL service.", + "maximum": 16, + "type": "integer", + "title": "innodb_ft_min_token_size", + "minimum": 0, + "example": 3, + }, + "innodb_change_buffer_max_size": map[string]any{ + "description": "Maximum size for the InnoDB change buffer, as a percentage of the total size of the buffer pool. Default is 25", + "maximum": 50, + "type": "integer", + "title": "innodb_change_buffer_max_size", + "minimum": 0, + "example": 30, + }, + "innodb_flush_neighbors": map[string]any{ + "description": "Specifies whether flushing a page from the InnoDB buffer pool also flushes other dirty pages in the same extent (default is 1): 0 - dirty pages in the same extent are not flushed, 1 - flush contiguous dirty pages in the same extent, 2 - flush dirty pages in the same extent", + "maximum": 2, + "type": "integer", + "title": "innodb_flush_neighbors", + "minimum": 0, + "example": 0, + }, + "tmp_table_size": map[string]any{ + "description": "Limits the size of internal in-memory tables. Also set max_heap_table_size. Default is 16777216 (16M)", + "maximum": 1073741824, + "type": "integer", + "title": "tmp_table_size", + "minimum": 1048576, + "example": 16777216, + }, + "slow_query_log": map[string]any{ + "description": "Slow query log enables capturing of slow queries. Setting slow_query_log to false also truncates the mysql.slow_log table. Default is off", + "type": "boolean", + "title": "slow_query_log", + "example": true, + }, + "connect_timeout": map[string]any{ + "description": "The number of seconds that the mysqld server waits for a connect packet before responding with Bad handshake", + "maximum": 3600, + "type": "integer", + "title": "connect_timeout", + "minimum": 2, + "example": 10, + }, + "net_read_timeout": map[string]any{ + "description": "The number of seconds to wait for more data from a connection before aborting the read.", + "maximum": 3600, + "type": "integer", + "title": "net_read_timeout", + "minimum": 1, + "example": 30, + }, + "innodb_lock_wait_timeout": map[string]any{ + "description": "The length of time in seconds an InnoDB transaction waits for a row lock before giving up.", + "maximum": 3600, + "type": "integer", + "title": "innodb_lock_wait_timeout", + "minimum": 1, + "example": 50, + }, + "wait_timeout": map[string]any{ + "description": "The number of seconds the server waits for activity on a noninteractive connection before closing it.", + "maximum": 2147483, + "type": "integer", + "title": "wait_timeout", + "minimum": 1, + "example": 28800, + }, + "innodb_rollback_on_timeout": map[string]any{ + "description": "When enabled a transaction timeout causes InnoDB to abort and roll back the entire transaction. Changing this parameter will lead to a restart of the MySQL service.", + "type": "boolean", + "title": "innodb_rollback_on_timeout", + "example": true, + }, + "group_concat_max_len": map[string]any{ + "description": "The maximum permitted result length in bytes for the GROUP_CONCAT() function.", + "maximum": "18446744073709551615", + "type": "integer", + "title": "group_concat_max_len", + "minimum": 4, + "example": 1024, + }, + "net_buffer_length": map[string]any{ + "description": "Start sizes of connection buffer and result buffer. Default is 16384 (16K). Changing this parameter will lead to a restart of the MySQL service.", + "maximum": 1048576, + "type": "integer", + "title": "net_buffer_length", + "minimum": 1024, + "example": 16384, + }, + "innodb_print_all_deadlocks": map[string]any{ + "description": "When enabled, information about all deadlocks in InnoDB user transactions is recorded in the error log. Disabled by default.", + "type": "boolean", + "title": "innodb_print_all_deadlocks", + "example": true, + }, + "innodb_online_alter_log_max_size": map[string]any{ + "description": "The upper limit in bytes on the size of the temporary log files used during online DDL operations for InnoDB tables.", + "maximum": 1099511627776, + "type": "integer", + "title": "innodb_online_alter_log_max_size", + "minimum": 65536, + "example": 134217728, + }, + "interactive_timeout": map[string]any{ + "description": "The number of seconds the server waits for activity on an interactive connection before closing it.", + "maximum": 604800, + "type": "integer", + "title": "interactive_timeout", + "minimum": 30, + "example": 3600, + }, + "innodb_log_buffer_size": map[string]any{ + "description": "The size in bytes of the buffer that InnoDB uses to write to the log files on disk.", + "maximum": 4294967295, + "type": "integer", + "title": "innodb_log_buffer_size", + "minimum": 1048576, + "example": 16777216, + }, + "max_allowed_packet": map[string]any{ + "description": "Size of the largest message in bytes that can be received by the server. Default is 67108864 (64M)", + "maximum": 1073741824, + "type": "integer", + "title": "max_allowed_packet", + "minimum": 102400, + "example": 67108864, + }, + "max_heap_table_size": map[string]any{ + "description": "Limits the size of internal in-memory tables. Also set tmp_table_size. Default is 16777216 (16M)", + "maximum": 1073741824, + "type": "integer", + "title": "max_heap_table_size", + "minimum": 1048576, + "example": 16777216, + }, + "innodb_ft_server_stopword_table": map[string]any{ + "description": "This option is used to specify your own InnoDB FULLTEXT index stopword list for all InnoDB tables.", + "type": []string{ + "null", + "string", + }, + "title": "innodb_ft_server_stopword_table", + "maxLength": 1024, + "example": "db_name/table_name", + "pattern": "^.+/.+$", + }, + "innodb_read_io_threads": map[string]any{ + "description": "The number of I/O threads for read operations in InnoDB. Default is 4. Changing this parameter will lead to a restart of the MySQL service.", + "maximum": 64, + "type": "integer", + "title": "innodb_read_io_threads", + "minimum": 1, + "example": 10, + }, + "sql_require_primary_key": map[string]any{ + "description": "Require primary key to be defined for new tables or old tables modified with ALTER TABLE and fail if missing. It is recommended to always have primary keys because various functionality may break if any large table is missing them.", + "type": "boolean", + "title": "sql_require_primary_key", + "example": true, + }, + "default_time_zone": map[string]any{ + "description": "Default server time zone as an offset from UTC (from -12:00 to +12:00), a time zone name, or 'SYSTEM' to use the MySQL server default.", + "type": "string", + "minLength": 2, + "title": "default_time_zone", + "maxLength": 100, + "example": "+03:00", + }, + "long_query_time": map[string]any{ + "description": "The slow_query_logs work as SQL statements that take more than long_query_time seconds to execute. Default is 10s", + "maximum": 3600, + "type": "number", + "title": "long_query_time", + "minimum": 0.0, + "example": 10, + }, + }, + AdditionalProperties: ptr.To[bool](false), + Type: "object", + Title: "mysql.conf configuration values", + }, +} diff --git a/operator/mysqlcontroller/update.go b/operator/mysqlcontroller/update.go index 5c895d49..41516b51 100644 --- a/operator/mysqlcontroller/update.go +++ b/operator/mysqlcontroller/update.go @@ -2,12 +2,14 @@ package mysqlcontroller import ( "context" + "encoding/json" "fmt" "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" "github.com/crossplane/crossplane-runtime/pkg/resource" - "github.com/exoscale/egoscale/v2/oapi" + exoscalesdk "github.com/exoscale/egoscale/v3" exoscalev1 "github.com/vshn/provider-exoscale/apis/exoscale/v1" + "github.com/vshn/provider-exoscale/operator/mapper" controllerruntime "sigs.k8s.io/controller-runtime" ) @@ -21,39 +23,36 @@ func (p *pipeline) Update(ctx context.Context, mg resource.Managed) (managed.Ext spec := mySQLInstance.Spec.ForProvider ipFilter := []string(spec.IPFilter) - settings, err := mapper.ToMap(spec.MySQLSettings) - if err != nil { - return managed.ExternalUpdate{}, fmt.Errorf("cannot map mySQLInstance settings: %w", err) + settings := exoscalesdk.JSONSchemaMysql{} + if len(spec.MySQLSettings.Raw) != 0 { + err := json.Unmarshal(spec.MySQLSettings.Raw, &settings) + if err != nil { + return managed.ExternalUpdate{}, fmt.Errorf("cannot map mySQLInstance settings: %w", err) + } } backupSchedule, err := mapper.ToBackupSchedule(spec.Backup.TimeOfDay) if err != nil { return managed.ExternalUpdate{}, fmt.Errorf("cannot parse mySQLInstance backup schedule: %w", err) } - body := oapi.UpdateDbaasServiceMysqlJSONRequestBody{ - Maintenance: &struct { - Dow oapi.UpdateDbaasServiceMysqlJSONBodyMaintenanceDow `json:"dow"` - Time string `json:"time"` - }{ - Dow: oapi.UpdateDbaasServiceMysqlJSONBodyMaintenanceDow(spec.Maintenance.DayOfWeek), + body := exoscalesdk.UpdateDBAASServiceMysqlRequest{ + Maintenance: &exoscalesdk.UpdateDBAASServiceMysqlRequestMaintenance{ + Dow: exoscalesdk.UpdateDBAASServiceMysqlRequestMaintenanceDow(spec.Maintenance.DayOfWeek), Time: spec.Maintenance.TimeOfDay.String(), }, - BackupSchedule: &struct { - BackupHour *int64 `json:"backup-hour,omitempty"` - BackupMinute *int64 `json:"backup-minute,omitempty"` - }{ + BackupSchedule: &exoscalesdk.UpdateDBAASServiceMysqlRequestBackupSchedule{ BackupHour: backupSchedule.BackupHour, BackupMinute: backupSchedule.BackupMinute, }, TerminationProtection: &spec.TerminationProtection, - Plan: &spec.Size.Plan, - IpFilter: &ipFilter, - MysqlSettings: &settings, + Plan: spec.Size.Plan, + IPFilter: ipFilter, + MysqlSettings: settings, } - resp, err := p.exo.UpdateDbaasServiceMysqlWithResponse(ctx, oapi.DbaasServiceName(mySQLInstance.GetInstanceName()), body) + resp, err := p.exo.UpdateDBAASServiceMysql(ctx, mySQLInstance.GetInstanceName(), body) if err != nil { return managed.ExternalUpdate{}, fmt.Errorf("cannot update mySQLInstance: %w", err) } - log.V(1).Info("response", "body", string(resp.Body)) + log.V(1).Info("response", "message", string(resp.Message)) return managed.ExternalUpdate{}, nil } diff --git a/operator/mysqlcontroller/webhook.go b/operator/mysqlcontroller/webhook.go index 9c6912b7..f415f00d 100644 --- a/operator/mysqlcontroller/webhook.go +++ b/operator/mysqlcontroller/webhook.go @@ -3,11 +3,13 @@ package mysqlcontroller import ( "context" "fmt" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" - exoscalesdk "github.com/exoscale/egoscale/v2" + exoscalesdk "github.com/exoscale/egoscale/v3" "github.com/go-logr/logr" exoscalev1 "github.com/vshn/provider-exoscale/apis/exoscale/v1" + "github.com/vshn/provider-exoscale/operator/common" "github.com/vshn/provider-exoscale/operator/pipelineutil" "github.com/vshn/provider-exoscale/operator/webhook" "k8s.io/apimachinery/pkg/runtime" @@ -36,7 +38,7 @@ func (v *Validator) ValidateCreate(ctx context.Context, obj runtime.Object) (adm return nil, err } - err = v.validateVersion(ctx, obj, *availableVersions) + err = v.validateVersion(ctx, obj, availableVersions) if err != nil { return nil, err } @@ -44,31 +46,30 @@ func (v *Validator) ValidateCreate(ctx context.Context, obj runtime.Object) (adm return nil, validateSpec(mySQLInstance) } -func (v *Validator) getAvailableVersions(ctx context.Context, obj runtime.Object) (*[]string, error) { +func (v *Validator) getAvailableVersions(ctx context.Context, obj runtime.Object) ([]string, error) { mySQLInstance := obj.(*exoscalev1.MySQL) v.log.V(1).Info("get mysql available versions") - exo, err := pipelineutil.OpenExoscaleClient(ctx, v.kube, mySQLInstance.GetProviderConfigName(), exoscalesdk.ClientOptWithAPIEndpoint(fmt.Sprintf("https://api-%s.exoscale.com", mySQLInstance.Spec.ForProvider.Zone))) + exo, err := pipelineutil.OpenExoscaleClient(ctx, v.kube, mySQLInstance.GetProviderConfigName(), exoscalesdk.ClientOptWithEndpoint(common.ZoneTranslation[mySQLInstance.Spec.ForProvider.Zone])) if err != nil { return nil, fmt.Errorf("open exoscale client failed: %w", err) } // get mysql available versions - resp, err := exo.Exoscale.GetDbaasServiceTypeWithResponse(ctx, serviceType) + resp, err := exo.Exoscale.GetDBAASServiceType(ctx, serviceType) if err != nil { return nil, fmt.Errorf("get DBaaS service type failed: %w", err) } - v.log.V(1).Info("DBaaS service type", "body", string(resp.Body)) + v.log.V(1).Info("DBaaS service type", "name", string(resp.Name), "description", string(resp.Description)) - serviceType := *resp.JSON200 - if serviceType.AvailableVersions == nil { + if resp.AvailableVersions == nil { return nil, fmt.Errorf("mysql available versions not found") } - return serviceType.AvailableVersions, nil + return resp.AvailableVersions, nil } -func (v *Validator) validateVersion(ctx context.Context, obj runtime.Object, availableVersions []string) error { +func (v *Validator) validateVersion(_ context.Context, obj runtime.Object, availableVersions []string) error { mySQLInstance := obj.(*exoscalev1.MySQL) v.log.V(1).Info("validate version") diff --git a/operator/opensearchcontroller/connector.go b/operator/opensearchcontroller/connector.go index 06aa332d..ea5fd204 100644 --- a/operator/opensearchcontroller/connector.go +++ b/operator/opensearchcontroller/connector.go @@ -2,13 +2,13 @@ package opensearchcontroller import ( "context" - "fmt" "github.com/crossplane/crossplane-runtime/pkg/event" "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" "github.com/crossplane/crossplane-runtime/pkg/resource" - exoscalesdk "github.com/exoscale/egoscale/v2" + exoscalesdk "github.com/exoscale/egoscale/v3" exoscalev1 "github.com/vshn/provider-exoscale/apis/exoscale/v1" + "github.com/vshn/provider-exoscale/operator/common" "github.com/vshn/provider-exoscale/operator/pipelineutil" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -25,7 +25,7 @@ func (c *connector) Connect(ctx context.Context, mg resource.Managed) (managed.E openSearchInstance := mg.(*exoscalev1.OpenSearch) - exo, err := pipelineutil.OpenExoscaleClient(ctx, c.Kube, openSearchInstance.GetProviderConfigReference().Name, exoscalesdk.ClientOptWithAPIEndpoint(fmt.Sprintf("https://api-%s.exoscale.com", openSearchInstance.Spec.ForProvider.Zone))) + exo, err := pipelineutil.OpenExoscaleClient(ctx, c.Kube, openSearchInstance.GetProviderConfigReference().Name, exoscalesdk.ClientOptWithEndpoint(common.ZoneTranslation[openSearchInstance.Spec.ForProvider.Zone])) if err != nil { return nil, err } diff --git a/operator/opensearchcontroller/create.go b/operator/opensearchcontroller/create.go index d20889a1..2173fa80 100644 --- a/operator/opensearchcontroller/create.go +++ b/operator/opensearchcontroller/create.go @@ -2,13 +2,14 @@ package opensearchcontroller import ( "context" + "encoding/json" "fmt" - "github.com/vshn/provider-exoscale/operator/mapper" "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" "github.com/crossplane/crossplane-runtime/pkg/resource" - "github.com/exoscale/egoscale/v2/oapi" + exoscalesdk "github.com/exoscale/egoscale/v3" exoscalev1 "github.com/vshn/provider-exoscale/apis/exoscale/v1" + controllerruntime "sigs.k8s.io/controller-runtime" ) @@ -18,31 +19,30 @@ func (p *pipeline) Create(ctx context.Context, mg resource.Managed) (managed.Ext openSearch := mg.(*exoscalev1.OpenSearch) forProvider := openSearch.Spec.ForProvider ipFilter := []string(forProvider.IPFilter) - settings, err := mapper.ToMap(forProvider.OpenSearchSettings) - if err != nil { - log.V(1).Error(err, "error parsing settings in OpenSearch/Create") - return managed.ExternalCreation{}, err + settings := exoscalesdk.JSONSchemaOpensearch{} + if len(forProvider.OpenSearchSettings.Raw) != 0 { + err := json.Unmarshal(forProvider.OpenSearchSettings.Raw, &settings) + if err != nil { + return managed.ExternalCreation{}, fmt.Errorf("cannot map opensearchInstance settings: %w", err) + } } - body := oapi.CreateDbaasServiceOpensearchJSONRequestBody{ + body := exoscalesdk.CreateDBAASServiceOpensearchRequest{ Plan: forProvider.Size.Plan, - IpFilter: &ipFilter, - Maintenance: &struct { - Dow oapi.CreateDbaasServiceOpensearchJSONBodyMaintenanceDow `json:"dow"` - Time string `json:"time"` - }{ - Dow: oapi.CreateDbaasServiceOpensearchJSONBodyMaintenanceDow(forProvider.Maintenance.DayOfWeek), + IPFilter: ipFilter, + Maintenance: &exoscalesdk.CreateDBAASServiceOpensearchRequestMaintenance{ + Dow: exoscalesdk.CreateDBAASServiceOpensearchRequestMaintenanceDow(forProvider.Maintenance.DayOfWeek), Time: forProvider.Maintenance.TimeOfDay.String(), }, - OpensearchSettings: &settings, + OpensearchSettings: settings, TerminationProtection: &forProvider.TerminationProtection, // majorVersion can be only major: ['1','2'] - Version: &forProvider.MajorVersion, + Version: forProvider.MajorVersion, } - resp, err := p.exo.CreateDbaasServiceOpensearchWithResponse(ctx, oapi.DbaasServiceName(openSearch.GetInstanceName()), body) + resp, err := p.exo.CreateDBAASServiceOpensearch(ctx, openSearch.GetInstanceName(), body) if err != nil { return managed.ExternalCreation{}, fmt.Errorf("cannot create OpenSearch Instance: %v, \nerr: %w", openSearch.GetInstanceName(), err) } - log.V(1).Info("resource created", "body", string(resp.Body)) + log.V(1).Info("resource created", "message", resp.Message) return managed.ExternalCreation{}, nil } diff --git a/operator/opensearchcontroller/delete.go b/operator/opensearchcontroller/delete.go index d7423d6c..6e3d7e90 100644 --- a/operator/opensearchcontroller/delete.go +++ b/operator/opensearchcontroller/delete.go @@ -15,10 +15,10 @@ func (p *pipeline) Delete(ctx context.Context, mg resource.Managed) error { log.Info("deleting resource") openSearch := mg.(*exoscalev1.OpenSearch) - resp, err := p.exo.DeleteDbaasServiceWithResponse(ctx, openSearch.GetInstanceName()) + resp, err := p.exo.DeleteDBAASServiceOpensearch(ctx, openSearch.GetInstanceName()) if err != nil { return fmt.Errorf("cannot delete OpenSearch: %w", err) } - log.V(1).Info("response", "json", string(resp.Body)) + log.V(1).Info("response", "message", string(resp.Message)) return nil } diff --git a/operator/opensearchcontroller/observe.go b/operator/opensearchcontroller/observe.go index 47f56010..e35f3468 100644 --- a/operator/opensearchcontroller/observe.go +++ b/operator/opensearchcontroller/observe.go @@ -2,17 +2,19 @@ package opensearchcontroller import ( "context" + "encoding/json" "errors" "fmt" "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" "github.com/crossplane/crossplane-runtime/pkg/resource" exoscaleapi "github.com/exoscale/egoscale/v2/api" - "github.com/exoscale/egoscale/v2/oapi" + exoscalesdk "github.com/exoscale/egoscale/v3" + "k8s.io/apimachinery/pkg/runtime" + "github.com/go-logr/logr" exoscalev1 "github.com/vshn/provider-exoscale/apis/exoscale/v1" "github.com/vshn/provider-exoscale/operator/mapper" - "k8s.io/utils/ptr" controllerruntime "sigs.k8s.io/controller-runtime" ) @@ -23,7 +25,7 @@ func (p *pipeline) Observe(ctx context.Context, mg resource.Managed) (managed.Ex openSearchInstance := mg.(*exoscalev1.OpenSearch) - resp, err := p.exo.GetDbaasServiceOpensearchWithResponse(ctx, oapi.DbaasServiceName(openSearchInstance.GetInstanceName())) + opensearch, err := p.exo.GetDBAASServiceOpensearch(ctx, openSearchInstance.GetInstanceName()) if err != nil { if errors.Is(err, exoscaleapi.ErrNotFound) { return managed.ExternalObservation{ResourceExists: false}, nil @@ -31,32 +33,30 @@ func (p *pipeline) Observe(ctx context.Context, mg resource.Managed) (managed.Ex return managed.ExternalObservation{}, fmt.Errorf("cannot observe openSearchInstance: %w", err) } - opensearch := *resp.JSON200 - log.V(2).Info("response", "raw", string(resp.Body)) log.V(1).Info("retrieved openSearchInstance", "state", opensearch.State) openSearchInstance.Status.AtProvider, err = mapObservation(opensearch) if err != nil { log.Error(err, "cannot map openSearchInstance observation, ignoring") } - var state oapi.EnumServiceState - if opensearch.State != nil { - state = *opensearch.State + var state exoscalesdk.EnumServiceState + if opensearch.State != "" { + state = opensearch.State } switch state { - case oapi.EnumServiceStateRunning: + case exoscalesdk.EnumServiceStateRunning: openSearchInstance.SetConditions(exoscalev1.Running()) - case oapi.EnumServiceStateRebuilding: + case exoscalesdk.EnumServiceStateRebuilding: openSearchInstance.SetConditions(exoscalev1.Rebuilding()) - case oapi.EnumServiceStatePoweroff: + case exoscalesdk.EnumServiceStatePoweroff: openSearchInstance.SetConditions(exoscalev1.PoweredOff()) - case oapi.EnumServiceStateRebalancing: + case exoscalesdk.EnumServiceStateRebalancing: openSearchInstance.SetConditions(exoscalev1.Rebalancing()) default: log.V(2).Info("ignoring unknown openSearchInstance state", "state", state) } - connDetails, err := connectionDetails(opensearch) + connDetails, err := connectionDetails(ctx, opensearch, p.exo) if err != nil { return managed.ExternalObservation{}, fmt.Errorf("cannot parse connection details: %w", err) } @@ -66,7 +66,7 @@ func (p *pipeline) Observe(ctx context.Context, mg resource.Managed) (managed.Ex return managed.ExternalObservation{}, fmt.Errorf("cannot parse parameters: %w", err) } - currentParams, err := setSettingsDefaults(ctx, p.exo, &openSearchInstance.Spec.ForProvider) + currentParams, err := setSettingsDefaults(ctx, *p.exo, &openSearchInstance.Spec.ForProvider) if err != nil { log.Error(err, "unable to set opensearch settings schema") currentParams = &openSearchInstance.Spec.ForProvider @@ -79,16 +79,20 @@ func (p *pipeline) Observe(ctx context.Context, mg resource.Managed) (managed.Ex }, nil } -func connectionDetails(in oapi.DbaasServiceOpensearch) (managed.ConnectionDetails, error) { - uriParams := *in.UriParams +func connectionDetails(ctx context.Context, in *exoscalesdk.DBAASServiceOpensearch, client *exoscalesdk.Client) (managed.ConnectionDetails, error) { + uriParams := *&in.URIParams + password, err := client.RevealDBAASOpensearchUserPassword(ctx, string(in.Name), in.ConnectionInfo.Username) + if err != nil { + return nil, fmt.Errorf("cannot reveal password for OpenSearch instance: %w", err) + } return map[string][]byte{ - "OPENSEARCH_USER": []byte(*in.ConnectionInfo.Username), - "OPENSEARCH_PASSWORD": []byte(*in.ConnectionInfo.Password), + "OPENSEARCH_USER": []byte(in.ConnectionInfo.Username), + "OPENSEARCH_PASSWORD": []byte(password.Password), "OPENSEARCH_HOST": []byte(uriParams["host"].(string)), "OPENSEARCH_PORT": []byte(uriParams["port"].(string)), - "OPENSEARCH_URI": []byte(*in.Uri), - "OPENSEARCH_DASHBOARD_URI": []byte(*in.ConnectionInfo.DashboardUri), + "OPENSEARCH_URI": []byte(in.URI), + "OPENSEARCH_DASHBOARD_URI": []byte(in.ConnectionInfo.DashboardURI), }, nil } @@ -116,20 +120,25 @@ func isUpToDate(current, external *exoscalev1.OpenSearchParameters, log logr.Log return ok } -func mapObservation(instance oapi.DbaasServiceOpensearch) (exoscalev1.OpenSearchObservation, error) { +func mapObservation(instance *exoscalesdk.DBAASServiceOpensearch) (exoscalev1.OpenSearchObservation, error) { observation := exoscalev1.OpenSearchObservation{ - MajorVersion: ptr.Deref(instance.Version, ""), + MajorVersion: instance.Version, NodeStates: mapper.ToNodeStates(instance.NodeStates), } - settings, err := mapper.ToRawExtension(instance.OpensearchSettings) + jsonSettings, err := json.Marshal(instance.OpensearchSettings) + if err != nil { + return exoscalev1.OpenSearchObservation{}, fmt.Errorf("error parsing OpenSearchSettings") + } + + settings := runtime.RawExtension{Raw: jsonSettings} if err != nil { return observation, fmt.Errorf("openSearchInstance settings: %w", err) } observation.OpenSearchSettings = settings observation.Maintenance = mapper.ToMaintenance(instance.Maintenance) - observation.DBaaSParameters = mapper.ToDBaaSParameters(instance.TerminationProtection, instance.Plan, instance.IpFilter) + observation.DBaaSParameters = mapper.ToDBaaSParameters(instance.TerminationProtection, instance.Plan, &instance.IPFilter) notifications, err := mapper.ToNotifications(instance.Notifications) if err != nil { return observation, fmt.Errorf("openSearchInstance notifications: %w", err) @@ -139,11 +148,15 @@ func mapObservation(instance oapi.DbaasServiceOpensearch) (exoscalev1.OpenSearch return observation, nil } -func mapParameters(in oapi.DbaasServiceOpensearch, zone string) (*exoscalev1.OpenSearchParameters, error) { - settings, err := mapper.ToRawExtension(in.OpensearchSettings) +func mapParameters(in *exoscalesdk.DBAASServiceOpensearch, zone string) (*exoscalev1.OpenSearchParameters, error) { + jsonSettings, err := json.Marshal(in.OpensearchSettings) + if err != nil { return nil, fmt.Errorf("cannot parse openSearchInstance settings: %w", err) } + + settings := runtime.RawExtension{Raw: jsonSettings} + return &exoscalev1.OpenSearchParameters{ Maintenance: exoscalev1.MaintenanceSpec{ DayOfWeek: in.Maintenance.Dow, @@ -154,7 +167,7 @@ func mapParameters(in oapi.DbaasServiceOpensearch, zone string) (*exoscalev1.Ope Size: exoscalev1.SizeSpec{ Plan: in.Plan, }, - IPFilter: mapper.ToSlice(in.IpFilter), + IPFilter: in.IPFilter, }, OpenSearchSettings: settings, }, nil diff --git a/operator/opensearchcontroller/pipeline.go b/operator/opensearchcontroller/pipeline.go index 34105869..a53dc6b7 100644 --- a/operator/opensearchcontroller/pipeline.go +++ b/operator/opensearchcontroller/pipeline.go @@ -2,7 +2,7 @@ package opensearchcontroller import ( "github.com/crossplane/crossplane-runtime/pkg/event" - exoscalesdk "github.com/exoscale/egoscale/v2" + exoscalesdk "github.com/exoscale/egoscale/v3" "sigs.k8s.io/controller-runtime/pkg/client" ) diff --git a/operator/opensearchcontroller/settings.go b/operator/opensearchcontroller/settings.go index 47fe2720..001bd12e 100644 --- a/operator/opensearchcontroller/settings.go +++ b/operator/opensearchcontroller/settings.go @@ -2,15 +2,16 @@ package opensearchcontroller import ( "context" + "encoding/json" + exoscalesdk "github.com/exoscale/egoscale/v3" exoscalev1 "github.com/vshn/provider-exoscale/apis/exoscale/v1" - "github.com/exoscale/egoscale/v2/oapi" "github.com/vshn/provider-exoscale/internal/settings" ) type settingsFetcher interface { - GetDbaasSettingsOpensearchWithResponse(ctx context.Context) (*oapi.GetDbaasSettingsOpensearchResponse, error) + GetDBAASSettingsOpensearch(ctx context.Context) (*exoscalesdk.GetDBAASSettingsOpensearchResponse, error) } func setSettingsDefaults(ctx context.Context, f settingsFetcher, in *exoscalev1.OpenSearchParameters) (*exoscalev1.OpenSearchParameters, error) { @@ -29,11 +30,15 @@ func setSettingsDefaults(ctx context.Context, f settingsFetcher, in *exoscalev1. } func fetchSettingSchema(ctx context.Context, f settingsFetcher) (settings.Schemas, error) { - resp, err := f.GetDbaasSettingsOpensearchWithResponse(ctx) + resp, err := f.GetDBAASSettingsOpensearch(ctx) if err != nil { return nil, err } - schemas, err := settings.ParseSchemas(resp.Body) + settingsJson, err := json.Marshal(resp) + if err != nil { + return nil, err + } + schemas, err := settings.ParseSchemas(settingsJson) if err != nil { return nil, err } diff --git a/operator/opensearchcontroller/settings_test.go b/operator/opensearchcontroller/settings_test.go index 39685e78..6fa02a37 100644 --- a/operator/opensearchcontroller/settings_test.go +++ b/operator/opensearchcontroller/settings_test.go @@ -4,19 +4,20 @@ import ( "context" "testing" - "github.com/exoscale/egoscale/v2/oapi" + exoscalesdk "github.com/exoscale/egoscale/v3" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" exoscalev1 "github.com/vshn/provider-exoscale/apis/exoscale/v1" "github.com/vshn/provider-exoscale/operator/mapper" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/ptr" ) type fakeSettingsFetcher struct{} -func (fakeSettingsFetcher) GetDbaasSettingsOpensearchWithResponse(ctx context.Context, reqEditors ...oapi.RequestEditorFn) (*oapi.GetDbaasSettingsOpensearchResponse, error) { - return &oapi.GetDbaasSettingsOpensearchResponse{ - Body: rawResponse, +func (fakeSettingsFetcher) GetDBAASSettingsOpensearch(ctx context.Context) (*exoscalesdk.GetDBAASSettingsOpensearchResponse, error) { + return &exoscalesdk.GetDBAASSettingsOpensearchResponse{ + Settings: &opensearchSettings, }, nil } @@ -43,10 +44,266 @@ func TestDefaultSettings(t *testing.T) { withDefaults, err := setSettingsDefaults(context.Background(), fakeSettingsFetcher{}, &found) require.NoError(t, err, "failed to set defaults") - setingsWithDefaults, err := mapper.ToMap(withDefaults.OpenSearchSettings) + settingsWithDefaults, err := mapper.ToMap(withDefaults.OpenSearchSettings) require.NoError(t, err, "failed to parse set defaults") - assert.EqualValues(t, 42, setingsWithDefaults["thread_pool_search_throttled_size"]) - assert.Len(t, setingsWithDefaults, 1) + assert.EqualValues(t, 42, settingsWithDefaults["thread_pool_search_throttled_size"]) + assert.Len(t, settingsWithDefaults, 1) } -var rawResponse = []byte(`{"settings":{"opensearch":{"properties":{"thread_pool_search_throttled_size":{"description":"Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value.","maximum":128,"type":"integer","title":"search_throttled thread pool size","minimum":1},"thread_pool_analyze_size":{"description":"Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value.","maximum":128,"type":"integer","title":"analyze thread pool size","minimum":1},"thread_pool_get_size":{"description":"Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value.","maximum":128,"type":"integer","title":"get thread pool size","minimum":1},"thread_pool_get_queue_size":{"description":"Size for the thread pool queue. See documentation for exact details.","maximum":2000,"type":"integer","title":"get thread pool queue size","minimum":10},"indices_recovery_max_concurrent_file_chunks":{"description":"Number of file chunks sent in parallel for each recovery. Defaults to 2.","maximum":5,"type":"integer","title":"indices.recovery.max_concurrent_file_chunks","minimum":2},"indices_queries_cache_size":{"description":"Percentage value. Default is 10%. Maximum amount of heap used for query cache. This is an expert setting. Too low value will decrease query performance and increase performance for other operations; too high value will cause issues with other OpenSearch functionality.","maximum":40,"type":"integer","title":"indices.queries.cache.size","minimum":3},"thread_pool_search_size":{"description":"Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value.","maximum":128,"type":"integer","title":"search thread pool size","minimum":1},"indices_recovery_max_bytes_per_sec":{"description":"Limits total inbound and outbound recovery traffic for each node. Applies to both peer recoveries as well as snapshot recoveries (i.e., restores from a snapshot). Defaults to 40mb","maximum":400,"type":"integer","title":"indices.recovery.max_bytes_per_sec","minimum":40},"http_max_initial_line_length":{"description":"The max length of an HTTP URL, in bytes","maximum":65536,"type":"integer","title":"http.max_initial_line_length","minimum":1024,"example":4096},"thread_pool_write_queue_size":{"description":"Size for the thread pool queue. See documentation for exact details.","maximum":2000,"type":"integer","title":"write thread pool queue size","minimum":10},"script_max_compilations_rate":{"description":"Script compilation circuit breaker limits the number of inline script compilations within a period of time. Default is use-context","type":"string","title":"Script max compilation rate - circuit breaker to prevent/minimize OOMs","maxLength":1024,"example":"75/5m"},"search_max_buckets":{"description":"Maximum number of aggregation buckets allowed in a single response. OpenSearch default value is used when this is not defined.","maximum":20000,"type":["integer","null"],"title":"search.max_buckets","minimum":1,"example":10000},"reindex_remote_whitelist":{"description":"Whitelisted addresses for reindexing. Changing this value will cause all OpenSearch instances to restart.","type":["array","null"],"title":"reindex_remote_whitelist","items":{"type":["string","null"],"title":"Address (hostname:port or IP:port)","maxLength":261,"example":"anotherservice.aivencloud.com:12398"},"maxItems":32},"override_main_response_version":{"description":"Compatibility mode sets OpenSearch to report its version as 7.10 so clients continue to work. Default is false","type":"boolean","title":"compatibility.override_main_response_version","example":true},"http_max_header_size":{"description":"The max size of allowed headers, in bytes","maximum":262144,"type":"integer","title":"http.max_header_size","minimum":1024,"example":8192},"email_sender_name":{"description":"This should be identical to the Sender name defined in Opensearch dashboards","type":["string"],"user_error":"Must consist of lower-case alpha-numeric characters and dashes, max 40 characters","title":"Sender email name placeholder to be used in Opensearch Dashboards and Opensearch keystore","maxLength":40,"example":"alert-sender","pattern":"^[a-zA-Z0-9-_]+$"},"indices_fielddata_cache_size":{"description":"Relative amount. Maximum amount of heap memory used for field data cache. This is an expert setting; decreasing the value too much will increase overhead of loading field data; too much memory used for field data cache will decrease amount of heap available for other operations.","default":null,"maximum":100,"type":["integer","null"],"title":"indices.fielddata.cache.size","minimum":3},"action_destructive_requires_name":{"type":["boolean","null"],"title":"Require explicit index names when deleting","example":true},"email_sender_username":{"type":["string"],"user_error":"Must be a valid email address","title":"Sender email address for Opensearch alerts","maxLength":320,"example":"jane@example.com","pattern":"^[A-Za-z0-9_\\-\\.+\\'&]+@(([\\da-zA-Z])([_\\w-]{,62})\\.){,127}(([\\da-zA-Z])[_\\w-]{,61})?([\\da-zA-Z]\\.((xn\\-\\-[a-zA-Z\\d]+)|([a-zA-Z\\d]{2,})))$"},"indices_memory_index_buffer_size":{"description":"Percentage value. Default is 10%. Total amount of heap used for indexing buffer, before writing segments to disk. This is an expert setting. Too low value will slow down indexing; too high value will increase indexing performance but causes performance issues for query performance.","maximum":40,"type":"integer","title":"indices.memory.index_buffer_size","minimum":3},"thread_pool_force_merge_size":{"description":"Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value.","maximum":128,"type":"integer","title":"force_merge thread pool size","minimum":1},"cluster_routing_allocation_node_concurrent_recoveries":{"description":"How many concurrent incoming/outgoing shard recoveries (normally replicas) are allowed to happen on a node. Defaults to 2.","maximum":16,"type":"integer","title":"Concurrent incoming/outgoing shard recoveries per node","minimum":2},"email_sender_password":{"description":"Sender email password for Opensearch alerts to authenticate with SMTP server","type":["string"],"title":"Sender email password for Opensearch alerts to authenticate with SMTP server","maxLength":1024,"example":"very-secure-mail-password","pattern":"^[^\\x00-\\x1F]+$"},"thread_pool_analyze_queue_size":{"description":"Size for the thread pool queue. See documentation for exact details.","maximum":2000,"type":"integer","title":"analyze thread pool queue size","minimum":10},"action_auto_create_index_enabled":{"description":"Explicitly allow or block automatic creation of indices. Defaults to true","type":"boolean","title":"action.auto_create_index","example":false},"http_max_content_length":{"description":"Maximum content length for HTTP requests to the OpenSearch HTTP API, in bytes.","maximum":2147483647,"type":"integer","title":"http.max_content_length","minimum":1},"thread_pool_write_size":{"description":"Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value.","maximum":128,"type":"integer","title":"write thread pool size","minimum":1},"thread_pool_search_queue_size":{"description":"Size for the thread pool queue. See documentation for exact details.","maximum":2000,"type":"integer","title":"search thread pool queue size","minimum":10},"indices_query_bool_max_clause_count":{"description":"Maximum number of clauses Lucene BooleanQuery can have. The default value (1024) is relatively high, and increasing it may cause performance issues. Investigate other approaches first before increasing this value.","maximum":4096,"type":"integer","title":"indices.query.bool.max_clause_count","minimum":64},"thread_pool_search_throttled_queue_size":{"description":"Size for the thread pool queue. See documentation for exact details.","maximum":2000,"type":"integer","title":"search_throttled thread pool queue size","minimum":10},"cluster_max_shards_per_node":{"description":"Controls the number of shards allowed in the cluster per data node","maximum":10000,"type":"integer","title":"cluster.max_shards_per_node","minimum":100,"example":1000}},"additionalProperties":false,"type":"object","title":"OpenSearch settings","dependencies":{"email_sender_name":{"required":["email_sender_username","email_sender_password"]},"email_sender_username":{"required":["email_sender_name","email_sender_password"]},"email_sender_password":{"required":["email_sender_username","email_sender_name"]}}}}}`) +var opensearchSettings = exoscalesdk.GetDBAASSettingsOpensearchResponseSettings{ + Opensearch: &exoscalesdk.GetDBAASSettingsOpensearchResponseSettingsOpensearch{ + Properties: map[string]any{ + "thread_pool_search_throttled_size": map[string]any{ + "description": "Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value.", + "maximum": 128, + "type": "integer", + "title": "search_throttled thread pool size", + "minimum": 1, + }, + "thread_pool_analyze_size": map[string]any{ + "description": "Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value.", + "maximum": 128, + "type": "integer", + "title": "analyze thread pool size", + "minimum": 1, + }, + "thread_pool_get_size": map[string]any{ + "description": "Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value.", + "maximum": 128, + "type": "integer", + "title": "get thread pool size", + "minimum": 1, + }, + "thread_pool_get_queue_size": map[string]any{ + "description": "Size for the thread pool queue. See documentation for exact details.", + "maximum": 2000, + "type": "integer", + "title": "get thread pool queue size", + "minimum": 10, + }, + "indices_recovery_max_concurrent_file_chunks": map[string]any{ + "description": "Number of file chunks sent in parallel for each recovery. Defaults to 2.", + "maximum": 5, + "type": "integer", + "title": "indices.recovery.max_concurrent_file_chunks", + "minimum": 2, + }, + "indices_queries_cache_size": map[string]any{ + "description": "Percentage value. Default is 10%. Maximum amount of heap used for query cache. This is an expert setting. Too low value will decrease query performance and increase performance for other operations; too high value will cause issues with other OpenSearch functionality.", + "maximum": 40, + "type": "integer", + "title": "indices.queries.cache.size", + "minimum": 3, + }, + "thread_pool_search_size": map[string]any{ + "description": "Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value.", + "maximum": 128, + "type": "integer", + "title": "search thread pool size", + "minimum": 1, + }, + "indices_recovery_max_bytes_per_sec": map[string]any{ + "description": "Limits total inbound and outbound recovery traffic for each node. Applies to both peer recoveries as well as snapshot recoveries (i.e., restores from a snapshot). Defaults to 40mb", + "maximum": 400, + "type": "integer", + "title": "indices.recovery.max_bytes_per_sec", + "minimum": 40, + }, + "http_max_initial_line_length": map[string]any{ + "description": "The max length of an HTTP URL, in bytes", + "maximum": 65536, + "type": "integer", + "title": "http.max_initial_line_length", + "minimum": 1024, + "example": 4096, + }, + "thread_pool_write_queue_size": map[string]any{ + "description": "Size for the thread pool queue. See documentation for exact details.", + "maximum": 2000, + "type": "integer", + "title": "write thread pool queue size", + "minimum": 10, + }, + "script_max_compilations_rate": map[string]any{ + "description": "Script compilation circuit breaker limits the number of inline script compilations within a period of time. Default is use-context", + "type": "string", + "title": "Script max compilation rate - circuit breaker to prevent/minimize OOMs", + "maxLength": 1024, + "example": "75/5m", + }, + "search_max_buckets": map[string]any{ + "description": "Maximum number of aggregation buckets allowed in a single response. OpenSearch default value is used when this is not defined.", + "maximum": 20000, + "type": []string{ + "integer", + "null", + }, + "title": "search.max_buckets", + "minimum": 1, + "example": 10000, + }, + "reindex_remote_whitelist": map[string]any{ + "description": "Whitelisted addresses for reindexing. Changing this value will cause all OpenSearch instances to restart.", + "type": []string{ + "array", + "null", + }, + "title": "reindex_remote_whitelist", + "items": map[string]any{ + "type": []string{ + "string", + "null", + }, + "title": "Address (hostname:port or IP:port)", + "maxLength": 261, + "example": "anotherservice.aivencloud.com:12398", + }, + "maxItems": 32, + }, + "override_main_response_version": map[string]any{ + "description": "Compatibility mode sets OpenSearch to report its version as 7.10 so clients continue to work. Default is false", + "type": "boolean", + "title": "compatibility.override_main_response_version", + "example": true, + }, + "http_max_header_size": map[string]any{ + "description": "The max size of allowed headers, in bytes", + "maximum": 262144, + "type": "integer", + "title": "http.max_header_size", + "minimum": 1024, + "example": 8192, + }, + "email_sender_name": map[string]any{ + "description": "This should be identical to the Sender name defined in Opensearch dashboards", + "type": []string{ + "string", + }, + "user_error": "Must consist of lower-case alpha-numeric characters and dashes, max 40 characters", + "title": "Sender email name placeholder to be used in Opensearch Dashboards and Opensearch keystore", + "maxLength": 40, + "example": "alert-sender", + "pattern": "^[a-zA-Z0-9-_]+$", + }, + "indices_fielddata_cache_size": map[string]any{ + "description": "Relative amount. Maximum amount of heap memory used for field data cache. This is an expert setting; decreasing the value too much will increase overhead of loading field data; too much memory used for field data cache will decrease amount of heap available for other operations.", + "default": "null", + "maximum": 100, + "type": []string{ + "integer", + "null", + }, + "title": "indices.fielddata.cache.size", + "minimum": 3, + }, + "action_destructive_requires_name": map[string]any{ + "type": []string{ + "boolean", + "null", + }, + "title": "Require explicit index names when deleting", + "example": true, + }, + "email_sender_username": map[string]any{ + "type": []string{ + "string", + }, + "user_error": "Must be a valid email address", + "title": "Sender email address for Opensearch alerts", + "maxLength": 320, + "example": "jane@example.com", + "pattern": "^[A-Za-z0-9_\\-\\.+\\'&]+@(([\\da-zA-Z])([_\\w-]{,62})\\.){,127}(([\\da-zA-Z])[_\\w-]{,61})?([\\da-zA-Z]\\.((xn\\-\\-[a-zA-Z\\d]+)|([a-zA-Z\\d]{2,})))$", + }, + "indices_memory_index_buffer_size": map[string]any{ + "description": "Percentage value. Default is 10%. Total amount of heap used for indexing buffer, before writing segments to disk. This is an expert setting. Too low value will slow down indexing; too high value will increase indexing performance but causes performance issues for query performance.", + "maximum": 40, + "type": "integer", + "title": "indices.memory.index_buffer_size", + "minimum": 3, + }, + "thread_pool_force_merge_size": map[string]any{ + "description": "Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value.", + "maximum": 128, + "type": "integer", + "title": "force_merge thread pool size", + "minimum": 1, + }, + "cluster_routing_allocation_node_concurrent_recoveries": map[string]any{ + "description": "How many concurrent incoming/outgoing shard recoveries (normally replicas) are allowed to happen on a node. Defaults to 2.", + "maximum": 16, + "type": "integer", + "title": "Concurrent incoming/outgoing shard recoveries per node", + "minimum": 2, + }, + "email_sender_password": map[string]any{ + "description": "Sender email password for Opensearch alerts to authenticate with SMTP server", + "type": []string{ + "string", + }, + "title": "Sender email password for Opensearch alerts to authenticate with SMTP server", + "maxLength": 1024, + "example": "very-secure-mail-password", + "pattern": "^[^\\x00-\\x1F]+$", + }, + "thread_pool_analyze_queue_size": map[string]any{ + "description": "Size for the thread pool queue. See documentation for exact details.", + "maximum": 2000, + "type": "integer", + "title": "analyze thread pool queue size", + "minimum": 10, + }, + "action_auto_create_index_enabled": map[string]any{ + "description": "Explicitly allow or block automatic creation of indices. Defaults to true", + "type": "boolean", + "title": "action.auto_create_index", + "example": false, + }, + "http_max_content_length": map[string]any{ + "description": "Maximum content length for HTTP requests to the OpenSearch HTTP API, in bytes.", + "maximum": 2147483647, + "type": "integer", + "title": "http.max_content_length", + "minimum": 1, + }, + "thread_pool_write_size": map[string]any{ + "description": "Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value.", + "maximum": 128, + "type": "integer", + "title": "write thread pool size", + "minimum": 1, + }, + "thread_pool_search_queue_size": map[string]any{ + "description": "Size for the thread pool queue. See documentation for exact details.", + "maximum": 2000, + "type": "integer", + "title": "search thread pool queue size", + "minimum": 10, + }, + "indices_query_bool_max_clause_count": map[string]any{ + "description": "Maximum number of clauses Lucene BooleanQuery can have. The default value (1024) is relatively high, and increasing it may cause performance issues. Investigate other approaches first before increasing this value.", + "maximum": 4096, + "type": "integer", + "title": "indices.query.bool.max_clause_count", + "minimum": 64, + }, + "thread_pool_search_throttled_queue_size": map[string]any{ + "description": "Size for the thread pool queue. See documentation for exact details.", + "maximum": 2000, + "type": "integer", + "title": "search_throttled thread pool queue size", + "minimum": 10, + }, + "cluster_max_shards_per_node": map[string]any{ + "description": "Controls the number of shards allowed in the cluster per data node", + "maximum": 10000, + "type": "integer", + "title": "cluster.max_shards_per_node", + "minimum": 100, + "example": 1000, + }, + }, + AdditionalProperties: ptr.To[bool](false), + Type: "object", + Title: "OpenSearch settings", + }, +} diff --git a/operator/opensearchcontroller/update.go b/operator/opensearchcontroller/update.go index 21afd349..93e68454 100644 --- a/operator/opensearchcontroller/update.go +++ b/operator/opensearchcontroller/update.go @@ -2,10 +2,12 @@ package opensearchcontroller import ( "context" + "encoding/json" "fmt" - "github.com/exoscale/egoscale/v2/oapi" + + exoscalesdk "github.com/exoscale/egoscale/v3" exoscalev1 "github.com/vshn/provider-exoscale/apis/exoscale/v1" - "github.com/vshn/provider-exoscale/operator/mapper" + controllerruntime "sigs.k8s.io/controller-runtime" "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" @@ -20,31 +22,30 @@ func (p *pipeline) Update(ctx context.Context, mg resource.Managed) (managed.Ext openSearchInstance := mg.(*exoscalev1.OpenSearch) forProvider := openSearchInstance.Spec.ForProvider - settings, err := mapper.ToMap(forProvider.OpenSearchSettings) - ipFilter := []string(forProvider.IPFilter) - - if err != nil { - return managed.ExternalUpdate{}, fmt.Errorf("cannot map openSearchInstance settings: %w", err) + settings := exoscalesdk.JSONSchemaOpensearch{} + if len(forProvider.OpenSearchSettings.Raw) != 0 { + err := json.Unmarshal(forProvider.OpenSearchSettings.Raw, &settings) + if err != nil { + return managed.ExternalUpdate{}, fmt.Errorf("cannot map opensearchInstance settings: %w", err) + } } + ipFilter := []string(forProvider.IPFilter) - body := oapi.UpdateDbaasServiceOpensearchJSONRequestBody{ - Maintenance: &struct { - Dow oapi.UpdateDbaasServiceOpensearchJSONBodyMaintenanceDow `json:"dow"` - Time string `json:"time"` - }{ - Dow: oapi.UpdateDbaasServiceOpensearchJSONBodyMaintenanceDow(forProvider.Maintenance.DayOfWeek), + body := exoscalesdk.UpdateDBAASServiceOpensearchRequest{ + Maintenance: &exoscalesdk.UpdateDBAASServiceOpensearchRequestMaintenance{ + Dow: exoscalesdk.UpdateDBAASServiceOpensearchRequestMaintenanceDow(forProvider.Maintenance.DayOfWeek), Time: forProvider.Maintenance.TimeOfDay.String()}, - OpensearchSettings: &settings, - Plan: &forProvider.Size.Plan, - IpFilter: &ipFilter, + OpensearchSettings: settings, + Plan: forProvider.Size.Plan, + IPFilter: ipFilter, TerminationProtection: &forProvider.TerminationProtection, } - resp, err := p.exo.UpdateDbaasServiceOpensearchWithResponse(ctx, oapi.DbaasServiceName(openSearchInstance.GetInstanceName()), body) + resp, err := p.exo.UpdateDBAASServiceOpensearch(ctx, openSearchInstance.GetInstanceName(), body) if err != nil { log.V(1).Error(err, "Failed do UPDATE resource, ", "instance name: ", openSearchInstance.GetInstanceName()) return managed.ExternalUpdate{}, err } - log.V(1).Info("response", "body", string(resp.Body)) + log.V(1).Info("response", "message", string(resp.Message)) return managed.ExternalUpdate{}, nil } diff --git a/operator/opensearchcontroller/webhook.go b/operator/opensearchcontroller/webhook.go index 7184ca5f..d8b5006b 100644 --- a/operator/opensearchcontroller/webhook.go +++ b/operator/opensearchcontroller/webhook.go @@ -3,12 +3,14 @@ package opensearchcontroller import ( "context" "fmt" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - exoscalesdk "github.com/exoscale/egoscale/v2" + exoscalesdk "github.com/exoscale/egoscale/v3" "github.com/go-logr/logr" exoscalev1 "github.com/vshn/provider-exoscale/apis/exoscale/v1" + "github.com/vshn/provider-exoscale/operator/common" "github.com/vshn/provider-exoscale/operator/mapper" "github.com/vshn/provider-exoscale/operator/pipelineutil" "github.com/vshn/provider-exoscale/operator/webhook" @@ -37,7 +39,7 @@ func (v *Validator) ValidateCreate(ctx context.Context, obj runtime.Object) (adm return nil, err } - err = v.validateVersion(ctx, obj, *availableVersions) + err = v.validateVersion(ctx, obj, availableVersions) if err != nil { return nil, err } @@ -45,28 +47,27 @@ func (v *Validator) ValidateCreate(ctx context.Context, obj runtime.Object) (adm return nil, v.validateSpec(openSearchInstance) } -func (v *Validator) getAvailableVersions(ctx context.Context, obj runtime.Object) (*[]string, error) { +func (v *Validator) getAvailableVersions(ctx context.Context, obj runtime.Object) ([]string, error) { openSearchInstance := obj.(*exoscalev1.OpenSearch) v.log.V(1).Info("get opensearch available versions") - exo, err := pipelineutil.OpenExoscaleClient(ctx, v.kube, openSearchInstance.GetProviderConfigReference().Name, exoscalesdk.ClientOptWithAPIEndpoint(fmt.Sprintf("https://api-%s.exoscale.com", openSearchInstance.Spec.ForProvider.Zone))) + exo, err := pipelineutil.OpenExoscaleClient(ctx, v.kube, openSearchInstance.GetProviderConfigReference().Name, exoscalesdk.ClientOptWithEndpoint(common.ZoneTranslation[openSearchInstance.Spec.ForProvider.Zone])) if err != nil { return nil, fmt.Errorf("open exoscale client failed: %w", err) } // get opensearch available versions - resp, err := exo.Exoscale.GetDbaasServiceTypeWithResponse(ctx, serviceType) + resp, err := exo.Exoscale.GetDBAASServiceType(ctx, serviceType) if err != nil { return nil, fmt.Errorf("get DBaaS service type failed: %w", err) } - v.log.V(1).Info("DBaaS service type", "body", string(resp.Body)) + v.log.V(1).Info("DBaaS service type", "name", string(resp.Name), "description", string(resp.Description)) - serviceType := *resp.JSON200 - if serviceType.AvailableVersions == nil { + if resp.AvailableVersions == nil { return nil, fmt.Errorf("opensearch available versions not found") } - return serviceType.AvailableVersions, nil + return resp.AvailableVersions, nil } func (v *Validator) validateVersion(_ context.Context, obj runtime.Object, availableVersions []string) error { diff --git a/operator/operator.go b/operator/operator.go index 1fbdb7fc..6a09ab29 100644 --- a/operator/operator.go +++ b/operator/operator.go @@ -4,6 +4,11 @@ import ( "github.com/vshn/provider-exoscale/operator/bucketcontroller" "github.com/vshn/provider-exoscale/operator/configcontroller" "github.com/vshn/provider-exoscale/operator/iamkeycontroller" + "github.com/vshn/provider-exoscale/operator/kafkacontroller" + "github.com/vshn/provider-exoscale/operator/mysqlcontroller" + "github.com/vshn/provider-exoscale/operator/opensearchcontroller" + "github.com/vshn/provider-exoscale/operator/postgresqlcontroller" + "github.com/vshn/provider-exoscale/operator/rediscontroller" ctrl "sigs.k8s.io/controller-runtime" ) @@ -14,11 +19,11 @@ func SetupControllers(mgr ctrl.Manager) error { bucketcontroller.SetupController, configcontroller.SetupController, iamkeycontroller.SetupController, - // mysqlcontroller.SetupController, - // postgresqlcontroller.SetupController, - // rediscontroller.SetupController, - // kafkacontroller.SetupController, - // opensearchcontroller.SetupController, + mysqlcontroller.SetupController, + postgresqlcontroller.SetupController, + rediscontroller.SetupController, + kafkacontroller.SetupController, + opensearchcontroller.SetupController, } { if err := setup(mgr); err != nil { return err @@ -32,11 +37,11 @@ func SetupWebhooks(mgr ctrl.Manager) error { for _, setup := range []func(ctrl.Manager) error{ bucketcontroller.SetupWebhook, iamkeycontroller.SetupWebhook, - // mysqlcontroller.SetupWebhook, - // postgresqlcontroller.SetupWebhook, - // rediscontroller.SetupWebhook, - // kafkacontroller.SetupWebhook, - // opensearchcontroller.SetupWebhook, + mysqlcontroller.SetupWebhook, + postgresqlcontroller.SetupWebhook, + rediscontroller.SetupWebhook, + kafkacontroller.SetupWebhook, + opensearchcontroller.SetupWebhook, } { if err := setup(mgr); err != nil { return err diff --git a/operator/postgresqlcontroller/connector.go b/operator/postgresqlcontroller/connector.go index 8b3a3989..563a4ff4 100644 --- a/operator/postgresqlcontroller/connector.go +++ b/operator/postgresqlcontroller/connector.go @@ -2,12 +2,13 @@ package postgresqlcontroller import ( "context" - "fmt" "github.com/crossplane/crossplane-runtime/pkg/event" "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" "github.com/crossplane/crossplane-runtime/pkg/resource" - exoscalesdk "github.com/exoscale/egoscale/v2" + exoscalesdk "github.com/exoscale/egoscale/v3" + exoscalev1 "github.com/vshn/provider-exoscale/apis/exoscale/v1" + "github.com/vshn/provider-exoscale/operator/common" "github.com/vshn/provider-exoscale/operator/pipelineutil" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -23,9 +24,9 @@ func (c *connector) Connect(ctx context.Context, mg resource.Managed) (managed.E log := ctrl.LoggerFrom(ctx) log.V(1).Info("Connecting resource") - pgInstance := fromManaged(mg) + pgInstance := mg.(*exoscalev1.PostgreSQL) - exo, err := pipelineutil.OpenExoscaleClient(ctx, c.kube, pgInstance.GetProviderConfigName(), exoscalesdk.ClientOptWithAPIEndpoint(fmt.Sprintf("https://api-%s.exoscale.com", pgInstance.Spec.ForProvider.Zone))) + exo, err := pipelineutil.OpenExoscaleClient(ctx, c.kube, pgInstance.GetProviderConfigName(), exoscalesdk.ClientOptWithEndpoint(common.ZoneTranslation[pgInstance.Spec.ForProvider.Zone])) if err != nil { return nil, err } diff --git a/operator/postgresqlcontroller/create.go b/operator/postgresqlcontroller/create.go index 9c4626e6..5e0ed9b4 100644 --- a/operator/postgresqlcontroller/create.go +++ b/operator/postgresqlcontroller/create.go @@ -2,15 +2,16 @@ package postgresqlcontroller import ( "context" + "encoding/json" "fmt" + exoscalesdk "github.com/exoscale/egoscale/v3" exoscalev1 "github.com/vshn/provider-exoscale/apis/exoscale/v1" "github.com/vshn/provider-exoscale/operator/mapper" "github.com/crossplane/crossplane-runtime/pkg/errors" "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" "github.com/crossplane/crossplane-runtime/pkg/resource" - "github.com/exoscale/egoscale/v2/oapi" controllerruntime "sigs.k8s.io/controller-runtime" ) @@ -19,7 +20,7 @@ func (p *pipeline) Create(ctx context.Context, mg resource.Managed) (managed.Ext log := controllerruntime.LoggerFrom(ctx) log.Info("Creating resource") - pgInstance := fromManaged(mg) + pgInstance := mg.(*exoscalev1.PostgreSQL) spec := pgInstance.Spec.ForProvider @@ -27,43 +28,46 @@ func (p *pipeline) Create(ctx context.Context, mg resource.Managed) (managed.Ext if err != nil { return managed.ExternalCreation{}, errors.Wrap(err, "cannot map spec to API request") } - resp, err := p.exo.CreateDbaasServicePgWithResponse(ctx, oapi.DbaasServiceName(pgInstance.Name), body) + resp, err := p.exo.CreateDBAASServicePG(ctx, pgInstance.Name, body) if err != nil { return managed.ExternalCreation{}, errors.Wrap(err, "cannot create instance") } - log.V(1).Info("Response", "json", resp.JSON200) + log.V(1).Info("Response", "message", resp.Message) return managed.ExternalCreation{}, nil } // fromSpecToCreateBody places the given spec into the request body. -func fromSpecToCreateBody(spec exoscalev1.PostgreSQLParameters) (oapi.CreateDbaasServicePgJSONRequestBody, error) { +func fromSpecToCreateBody(spec exoscalev1.PostgreSQLParameters) (exoscalesdk.CreateDBAASServicePGRequest, error) { /** NOTE: If you change anything below, also update fromSpecToCreateBody(). Unfortunately the generated openapi-types in exoscale are unusable for reusing same properties. */ backupSchedule, err := mapper.ToBackupSchedule(spec.Backup.TimeOfDay) if err != nil { - return oapi.CreateDbaasServicePgJSONRequestBody{}, fmt.Errorf("invalid backup schedule: %w", err) + return exoscalesdk.CreateDBAASServicePGRequest{}, fmt.Errorf("invalid backup schedule: %w", err) } - settings, err := mapper.ToMap(spec.PGSettings) - if err != nil { - return oapi.CreateDbaasServicePgJSONRequestBody{}, fmt.Errorf("invalid pgsettings: %w", err) + settings := exoscalesdk.JSONSchemaPG{} + if len(spec.PGSettings.Raw) != 0 { + err = json.Unmarshal(spec.PGSettings.Raw, &settings) + if err != nil { + return exoscalesdk.CreateDBAASServicePGRequest{}, fmt.Errorf("invalid pgsettings: %w", err) + } } - return oapi.CreateDbaasServicePgJSONRequestBody{ - Plan: spec.Size.Plan, - BackupSchedule: &backupSchedule, - Variant: &variantAiven, - Version: &spec.Version, + return exoscalesdk.CreateDBAASServicePGRequest{ + Plan: spec.Size.Plan, + BackupSchedule: &exoscalesdk.CreateDBAASServicePGRequestBackupSchedule{ + BackupHour: backupSchedule.BackupHour, + BackupMinute: backupSchedule.BackupMinute, + }, + Variant: variantAiven, + Version: exoscalesdk.DBAASPGTargetVersions(spec.Version), TerminationProtection: &spec.TerminationProtection, - Maintenance: &struct { - Dow oapi.CreateDbaasServicePgJSONBodyMaintenanceDow `json:"dow"` - Time string `json:"time"` - }{ - Dow: oapi.CreateDbaasServicePgJSONBodyMaintenanceDow(spec.Maintenance.DayOfWeek), + Maintenance: &exoscalesdk.CreateDBAASServicePGRequestMaintenance{ + Dow: exoscalesdk.CreateDBAASServicePGRequestMaintenanceDow(spec.Maintenance.DayOfWeek), Time: spec.Maintenance.TimeOfDay.String(), }, - IpFilter: mapper.ToSlicePtr(spec.IPFilter), - PgSettings: &settings, + IPFilter: spec.IPFilter, + PGSettings: settings, }, nil } diff --git a/operator/postgresqlcontroller/delete.go b/operator/postgresqlcontroller/delete.go index 8f81d21c..3eef96bd 100644 --- a/operator/postgresqlcontroller/delete.go +++ b/operator/postgresqlcontroller/delete.go @@ -5,6 +5,7 @@ import ( "fmt" "github.com/crossplane/crossplane-runtime/pkg/resource" + exoscalev1 "github.com/vshn/provider-exoscale/apis/exoscale/v1" controllerruntime "sigs.k8s.io/controller-runtime" ) @@ -13,11 +14,11 @@ func (p *pipeline) Delete(ctx context.Context, mg resource.Managed) error { log := controllerruntime.LoggerFrom(ctx) log.Info("Deleting resource") - pgInstance := fromManaged(mg) - resp, err := p.exo.DeleteDbaasServiceWithResponse(ctx, pgInstance.GetInstanceName()) + pgInstance := mg.(*exoscalev1.PostgreSQL) + resp, err := p.exo.DeleteDBAASServicePG(ctx, pgInstance.GetInstanceName()) if err != nil { return fmt.Errorf("cannot delete instance: %w", err) } - log.V(1).Info("Response when deleting", "json", resp.JSON200) + log.V(1).Info("Response when deleting", "message", resp.Message) return nil } diff --git a/operator/postgresqlcontroller/observe.go b/operator/postgresqlcontroller/observe.go index 8f37002f..a9b35d67 100644 --- a/operator/postgresqlcontroller/observe.go +++ b/operator/postgresqlcontroller/observe.go @@ -2,15 +2,19 @@ package postgresqlcontroller import ( "context" + "encoding/json" "fmt" - "k8s.io/utils/ptr" "net/url" "strings" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/ptr" + "github.com/crossplane/crossplane-runtime/pkg/errors" "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" "github.com/crossplane/crossplane-runtime/pkg/resource" - "github.com/exoscale/egoscale/v2/oapi" + exoscalesdk "github.com/exoscale/egoscale/v3" + "github.com/go-logr/logr" exoscalev1 "github.com/vshn/provider-exoscale/apis/exoscale/v1" "github.com/vshn/provider-exoscale/operator/mapper" @@ -22,72 +26,77 @@ func (p *pipeline) Observe(ctx context.Context, mg resource.Managed) (managed.Ex log := controllerruntime.LoggerFrom(ctx) log.V(1).Info("Observing resource") - pgInstance := fromManaged(mg) + pgInstance := mg.(*exoscalev1.PostgreSQL) - resp, err := p.exo.GetDbaasServicePgWithResponse(ctx, oapi.DbaasServiceName(pgInstance.GetInstanceName())) + pg, err := p.exo.GetDBAASServicePG(ctx, pgInstance.GetInstanceName()) if err != nil { - return managed.ExternalObservation{}, ignoreNotFound(err) + if errors.Is(err, exoscalesdk.ErrNotFound) { + return managed.ExternalObservation{ResourceExists: false}, nil + } + return managed.ExternalObservation{}, fmt.Errorf("cannot observe pgInstance: %w", err) } - pgExo := *resp.JSON200 - log.V(2).Info("Response", "raw", resp.JSON200) - log.V(1).Info("Retrieved instance", "state", pgExo.State) - pgInstance.Status.AtProvider, err = mapObservation(pgExo) + log.V(1).Info("Retrieved instance", "state", pg.State) + + pgInstance.Status.AtProvider, err = mapObservation(pg) if err != nil { return managed.ExternalObservation{}, errors.Wrap(err, "cannot parse instance status") } - setConditionFromState(pgExo, pgInstance) + setConditionFromState(*pg, pgInstance) - ca, err := p.exo.GetDatabaseCACertificate(ctx, pgInstance.Spec.ForProvider.Zone.String()) + caCert, err := p.exo.GetDBAASCACertificate(ctx) if err != nil { return managed.ExternalObservation{}, errors.Wrap(err, "cannot retrieve CA certificate") } - pp, err := mapParameters(pgExo, pgInstance.Spec.ForProvider.Zone) + params, err := mapParameters(pg, pgInstance.Spec.ForProvider.Zone) if err != nil { return managed.ExternalObservation{}, err } - connDetails, err := connectionDetails(pgExo, ca) + connDetails, err := connectionDetails(ctx, pg, caCert.Certificate, p.exo) if err != nil { return managed.ExternalObservation{}, errors.Wrap(err, "cannot read connection details") } - currentParams, err := setSettingsDefaults(ctx, p.exo, &pgInstance.Spec.ForProvider) + currentParams, err := setSettingsDefaults(ctx, *p.exo, &pgInstance.Spec.ForProvider) if err != nil { log.Error(err, "unable to set postgres settings schema") currentParams = &pgInstance.Spec.ForProvider } return managed.ExternalObservation{ ResourceExists: true, - ResourceUpToDate: isUpToDate(currentParams, pp, log), + ResourceUpToDate: isUpToDate(currentParams, params, log), ConnectionDetails: connDetails, }, nil } -// mapParameters converts a oapi.DbaasServicePg to the internal exoscalev1.PostgreSQLParameters type. -func mapParameters(in oapi.DbaasServicePg, zone exoscalev1.Zone) (*exoscalev1.PostgreSQLParameters, error) { - settings, err := mapper.ToRawExtension(in.PgSettings) +// mapParameters converts a exoscalesdk.DBAASServicePG to the internal exoscalev1.PostgreSQLParameters type. +func mapParameters(in *exoscalesdk.DBAASServicePG, zone exoscalev1.Zone) (*exoscalev1.PostgreSQLParameters, error) { + + jsonSettings, err := json.Marshal(in.PGSettings) if err != nil { - return nil, fmt.Errorf("unable to parse settings: %w", err) + return nil, fmt.Errorf("cannot parse pgInstance settings: %w", err) } + settings := runtime.RawExtension{Raw: jsonSettings} + return &exoscalev1.PostgreSQLParameters{ Maintenance: exoscalev1.MaintenanceSpec{ DayOfWeek: in.Maintenance.Dow, TimeOfDay: exoscalev1.TimeOfDay(in.Maintenance.Time), }, - Backup: mapper.ToBackupSpec(in.BackupSchedule), + Backup: toBackupSpec(in.BackupSchedule), Zone: zone, DBaaSParameters: exoscalev1.DBaaSParameters{ TerminationProtection: ptr.Deref(in.TerminationProtection, false), Size: exoscalev1.SizeSpec{ Plan: in.Plan, }, - IPFilter: *in.IpFilter, + IPFilter: in.IPFilter, }, - Version: ptr.Deref(in.Version, ""), + Version: in.Version, PGSettings: settings, }, nil } @@ -100,44 +109,47 @@ func ignoreNotFound(err error) error { return errors.Wrap(err, "cannot observe instance") } -func setConditionFromState(pgExo oapi.DbaasServicePg, pgInstance *exoscalev1.PostgreSQL) { - switch *pgExo.State { - case oapi.EnumServiceStateRunning: +func setConditionFromState(pgExo exoscalesdk.DBAASServicePG, pgInstance *exoscalev1.PostgreSQL) { + switch pgExo.State { + case exoscalesdk.EnumServiceStateRunning: pgInstance.SetConditions(exoscalev1.Running()) - case oapi.EnumServiceStateRebuilding: + case exoscalesdk.EnumServiceStateRebuilding: pgInstance.SetConditions(exoscalev1.Rebuilding()) - case oapi.EnumServiceStatePoweroff: + case exoscalesdk.EnumServiceStatePoweroff: pgInstance.SetConditions(exoscalev1.PoweredOff()) - case oapi.EnumServiceStateRebalancing: + case exoscalesdk.EnumServiceStateRebalancing: pgInstance.SetConditions(exoscalev1.Rebalancing()) } } -var variantAiven = oapi.EnumPgVariantAiven +var variantAiven = exoscalesdk.EnumPGVariantAiven // mapObservation fills the status fields from the given response body. -func mapObservation(pg oapi.DbaasServicePg) (exoscalev1.PostgreSQLObservation, error) { +func mapObservation(instance *exoscalesdk.DBAASServicePG) (exoscalev1.PostgreSQLObservation, error) { + jsonSettings, err := json.Marshal(instance.PGSettings) + if err != nil { + return exoscalev1.PostgreSQLObservation{}, fmt.Errorf("error parsing PgSettings") + } + + settings := runtime.RawExtension{Raw: jsonSettings} + observation := exoscalev1.PostgreSQLObservation{ DBaaSParameters: exoscalev1.DBaaSParameters{ - TerminationProtection: ptr.Deref(pg.TerminationProtection, false), + TerminationProtection: ptr.Deref(instance.TerminationProtection, false), Size: exoscalev1.SizeSpec{ - Plan: pg.Plan, + Plan: instance.Plan, }, - IPFilter: *pg.IpFilter, + IPFilter: instance.IPFilter, }, - Version: ptr.Deref(pg.Version, ""), + Version: instance.Version, Maintenance: exoscalev1.MaintenanceSpec{ - DayOfWeek: pg.Maintenance.Dow, - TimeOfDay: exoscalev1.TimeOfDay(pg.Maintenance.Time), + DayOfWeek: instance.Maintenance.Dow, + TimeOfDay: exoscalev1.TimeOfDay(instance.Maintenance.Time), }, - Backup: mapper.ToBackupSpec(pg.BackupSchedule), - NodeStates: mapper.ToNodeStates(pg.NodeStates), + Backup: toBackupSpec(instance.BackupSchedule), + NodeStates: mapper.ToNodeStates(instance.NodeStates), } - settings, err := mapper.ToRawExtension(pg.PgSettings) - if err != nil { - return observation, errors.Wrap(err, "cannot marshal json") - } observation.PGSettings = settings return observation, nil @@ -173,20 +185,38 @@ func isUpToDate(current, external *exoscalev1.PostgreSQLParameters, log logr.Log } // connectionDetails parses the connection details from the given observation. -func connectionDetails(pgExo oapi.DbaasServicePg, ca string) (managed.ConnectionDetails, error) { - raw := ptr.Deref(pgExo.Uri, "") - parsed, err := url.Parse(raw) +func connectionDetails(ctx context.Context, in *exoscalesdk.DBAASServicePG, ca string, client *exoscalesdk.Client) (managed.ConnectionDetails, error) { + uri := in.URI + // uri may be absent + if uri == "" { + if in.ConnectionInfo == nil || in.ConnectionInfo.URI == nil || len(in.ConnectionInfo.URI) == 0 { + return map[string][]byte{}, nil + } + uri = in.ConnectionInfo.URI[0] + } + parsed, err := url.Parse(uri) if err != nil { return nil, fmt.Errorf("cannot parse connection URL: %w", err) } - password, _ := parsed.User.Password() + password, err := client.RevealDBAASPostgresUserPassword(ctx, string(in.Name), parsed.User.Username()) + if err != nil { + return nil, fmt.Errorf("cannot reveal password for PostgreSQL instance: %w", err) + } return map[string][]byte{ "POSTGRESQL_USER": []byte(parsed.User.Username()), - "POSTGRESQL_PASSWORD": []byte(password), - "POSTGRESQL_URL": []byte(raw), + "POSTGRESQL_PASSWORD": []byte(password.Password), + "POSTGRESQL_URL": []byte(uri), "POSTGRESQL_DB": []byte(strings.TrimPrefix(parsed.Path, "/")), "POSTGRESQL_HOST": []byte(parsed.Hostname()), "POSTGRESQL_PORT": []byte(parsed.Port()), "ca.crt": []byte(ca), }, nil } + +func toBackupSpec(schedule *exoscalesdk.DBAASServicePGBackupSchedule) exoscalev1.BackupSpec { + if schedule == nil { + return exoscalev1.BackupSpec{} + } + hour, min := schedule.BackupHour, schedule.BackupMinute + return exoscalev1.BackupSpec{TimeOfDay: exoscalev1.TimeOfDay(fmt.Sprintf("%02d:%02d:00", hour, min))} +} diff --git a/operator/postgresqlcontroller/observe_test.go b/operator/postgresqlcontroller/observe_test.go index 8954e26f..3127e0ac 100644 --- a/operator/postgresqlcontroller/observe_test.go +++ b/operator/postgresqlcontroller/observe_test.go @@ -1,9 +1,10 @@ package postgresqlcontroller import ( + "context" "testing" - "github.com/exoscale/egoscale/v2/oapi" + exoscalesdk "github.com/exoscale/egoscale/v3" "github.com/stretchr/testify/assert" ) @@ -27,10 +28,13 @@ func Test_connectionDetails(t *testing.T) { expectedDatabase: "defaultdb", }, } + ctx := context.TODO() + client := exoscalesdk.Client{} + for name, tc := range tests { t.Run(name, func(t *testing.T) { - exo := oapi.DbaasServicePg{Uri: &tc.givenUri} - secrets, err := connectionDetails(exo, "somebase64string") + exo := exoscalesdk.DBAASServicePG{URI: tc.givenUri} + secrets, err := connectionDetails(ctx, &exo, "somebase64string", &client) assert.NoError(t, err) assert.Equal(t, tc.expectedUser, string(secrets["POSTGRESQL_USER"]), "username") assert.Equal(t, tc.expectedPassword, string(secrets["POSTGRESQL_PASSWORD"]), "password") diff --git a/operator/postgresqlcontroller/pipeline.go b/operator/postgresqlcontroller/pipeline.go index df2c3cb1..9685c2d2 100644 --- a/operator/postgresqlcontroller/pipeline.go +++ b/operator/postgresqlcontroller/pipeline.go @@ -2,9 +2,7 @@ package postgresqlcontroller import ( "github.com/crossplane/crossplane-runtime/pkg/event" - "github.com/crossplane/crossplane-runtime/pkg/resource" - exoscalesdk "github.com/exoscale/egoscale/v2" - exoscalev1 "github.com/vshn/provider-exoscale/apis/exoscale/v1" + exoscalesdk "github.com/exoscale/egoscale/v3" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -23,7 +21,3 @@ func newPipeline(client client.Client, recorder event.Recorder, exoscaleClient * exo: exoscaleClient, } } - -func fromManaged(mg resource.Managed) *exoscalev1.PostgreSQL { - return mg.(*exoscalev1.PostgreSQL) -} diff --git a/operator/postgresqlcontroller/settings.go b/operator/postgresqlcontroller/settings.go index fb65d9e3..77e4155a 100644 --- a/operator/postgresqlcontroller/settings.go +++ b/operator/postgresqlcontroller/settings.go @@ -2,15 +2,16 @@ package postgresqlcontroller import ( "context" + "encoding/json" + exoscalesdk "github.com/exoscale/egoscale/v3" exoscalev1 "github.com/vshn/provider-exoscale/apis/exoscale/v1" - "github.com/exoscale/egoscale/v2/oapi" "github.com/vshn/provider-exoscale/internal/settings" ) type settingsFetcher interface { - GetDbaasSettingsPgWithResponse(ctx context.Context, reqEditors ...oapi.RequestEditorFn) (*oapi.GetDbaasSettingsPgResponse, error) + GetDBAASSettingsPG(ctx context.Context) (*exoscalesdk.GetDBAASSettingsPGResponse, error) } func setSettingsDefaults(ctx context.Context, f settingsFetcher, in *exoscalev1.PostgreSQLParameters) (*exoscalev1.PostgreSQLParameters, error) { @@ -29,11 +30,15 @@ func setSettingsDefaults(ctx context.Context, f settingsFetcher, in *exoscalev1. } func fetchSettingSchema(ctx context.Context, f settingsFetcher) (settings.Schemas, error) { - resp, err := f.GetDbaasSettingsPgWithResponse(ctx) + resp, err := f.GetDBAASSettingsPG(ctx) if err != nil { return nil, err } - schemas, err := settings.ParseSchemas(resp.Body) + settingsJson, err := json.Marshal(resp) + if err != nil { + return nil, err + } + schemas, err := settings.ParseSchemas(settingsJson) if err != nil { return nil, err } diff --git a/operator/postgresqlcontroller/settings_test.go b/operator/postgresqlcontroller/settings_test.go index 7cc969a1..92986a5c 100644 --- a/operator/postgresqlcontroller/settings_test.go +++ b/operator/postgresqlcontroller/settings_test.go @@ -4,19 +4,21 @@ import ( "context" "testing" - "github.com/exoscale/egoscale/v2/oapi" + exoscalesdk "github.com/exoscale/egoscale/v3" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" exoscalev1 "github.com/vshn/provider-exoscale/apis/exoscale/v1" + "github.com/vshn/provider-exoscale/operator/mapper" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/ptr" ) type fakeSettingsFetcher struct{} -func (fakeSettingsFetcher) GetDbaasSettingsPgWithResponse(ctx context.Context, reqEditors ...oapi.RequestEditorFn) (*oapi.GetDbaasSettingsPgResponse, error) { - return &oapi.GetDbaasSettingsPgResponse{ - Body: rawResponse, +func (fakeSettingsFetcher) GetDBAASSettingsPG(ctx context.Context) (*exoscalesdk.GetDBAASSettingsPGResponse, error) { + return &exoscalesdk.GetDBAASSettingsPGResponse{ + Settings: &pgSettings, }, nil } @@ -49,4 +51,510 @@ func TestDefaultSettings(t *testing.T) { assert.Len(t, setingsWithDefaults, 1) } -var rawResponse = []byte(`{"settings":{"pg":{"properties":{"track_activity_query_size":{"description":"Specifies the number of bytes reserved to track the currently executing command for each active session.","maximum":10240,"type":"integer","title":"track_activity_query_size","minimum":1024,"example":1024},"log_autovacuum_min_duration":{"description":"Causes each action executed by autovacuum to be logged if it ran for at least the specified number of milliseconds. Setting this to zero logs all autovacuum actions. Minus-one (the default) disables logging autovacuum actions.","maximum":2147483647,"type":"integer","title":"log_autovacuum_min_duration","minimum":-1},"autovacuum_vacuum_cost_limit":{"description":"Specifies the cost limit value that will be used in automatic VACUUM operations. If -1 is specified (which is the default), the regular vacuum_cost_limit value will be used.","maximum":10000,"type":"integer","title":"autovacuum_vacuum_cost_limit","minimum":-1},"timezone":{"description":"PostgreSQL service timezone","type":"string","title":"timezone","maxLength":64,"example":"Europe/Helsinki"},"track_io_timing":{"description":"Enables timing of database I/O calls. This parameter is off by default, because it will repeatedly query the operating system for the current time, which may cause significant overhead on some platforms.","enum":["off","on"],"type":"string","title":"track_io_timing","example":"off"},"pg_stat_monitor.pgsm_enable_query_plan":{"description":"Enables or disables query plan monitoring","type":"boolean","title":"pg_stat_monitor.pgsm_enable_query_plan","example":false},"max_files_per_process":{"description":"PostgreSQL maximum number of files that can be open per process","maximum":4096,"type":"integer","title":"max_files_per_process","minimum":1000},"pg_stat_monitor.pgsm_max_buckets":{"description":"Sets the maximum number of buckets ","maximum":10,"type":"integer","title":"pg_stat_monitor.pgsm_max_buckets","minimum":1,"example":10},"bgwriter_delay":{"description":"Specifies the delay between activity rounds for the background writer in milliseconds. Default is 200.","maximum":10000,"type":"integer","title":"bgwriter_delay","minimum":10,"example":200},"autovacuum_max_workers":{"description":"Specifies the maximum number of autovacuum processes (other than the autovacuum launcher) that may be running at any one time. The default is three. This parameter can only be set at server start.","maximum":20,"type":"integer","title":"autovacuum_max_workers","minimum":1},"bgwriter_flush_after":{"description":"Whenever more than bgwriter_flush_after bytes have been written by the background writer, attempt to force the OS to issue these writes to the underlying storage. Specified in kilobytes, default is 512. Setting of 0 disables forced writeback.","maximum":2048,"type":"integer","title":"bgwriter_flush_after","minimum":0,"example":512},"default_toast_compression":{"description":"Specifies the default TOAST compression method for values of compressible columns (the default is lz4).","enum":["lz4","pglz"],"type":"string","title":"default_toast_compression","example":"lz4"},"deadlock_timeout":{"description":"This is the amount of time, in milliseconds, to wait on a lock before checking to see if there is a deadlock condition.","maximum":1800000,"type":"integer","title":"deadlock_timeout","minimum":500,"example":1000},"idle_in_transaction_session_timeout":{"description":"Time out sessions with open transactions after this number of milliseconds","maximum":604800000,"type":"integer","title":"idle_in_transaction_session_timeout","minimum":0},"max_pred_locks_per_transaction":{"description":"PostgreSQL maximum predicate locks per transaction","maximum":5120,"type":"integer","title":"max_pred_locks_per_transaction","minimum":64},"max_replication_slots":{"description":"PostgreSQL maximum replication slots","maximum":64,"type":"integer","title":"max_replication_slots","minimum":8},"autovacuum_vacuum_threshold":{"description":"Specifies the minimum number of updated or deleted tuples needed to trigger a VACUUM in any one table. The default is 50 tuples","maximum":2147483647,"type":"integer","title":"autovacuum_vacuum_threshold","minimum":0},"max_parallel_workers_per_gather":{"description":"Sets the maximum number of workers that can be started by a single Gather or Gather Merge node","maximum":96,"type":"integer","title":"max_parallel_workers_per_gather","minimum":0},"bgwriter_lru_multiplier":{"description":"The average recent need for new buffers is multiplied by bgwriter_lru_multiplier to arrive at an estimate of the number that will be needed during the next round, (up to bgwriter_lru_maxpages). 1.0 represents a “just in time” policy of writing exactly the number of buffers predicted to be needed. Larger values provide some cushion against spikes in demand, while smaller values intentionally leave writes to be done by server processes. The default is 2.0.","maximum":10,"type":"number","title":"bgwriter_lru_multiplier","minimum":0,"example":2.0},"pg_partman_bgw.interval":{"description":"Sets the time interval to run pg_partman's scheduled tasks","maximum":604800,"type":"integer","title":"pg_partman_bgw.interval","minimum":3600,"example":3600},"autovacuum_naptime":{"description":"Specifies the minimum delay between autovacuum runs on any given database. The delay is measured in seconds, and the default is one minute","maximum":86400,"type":"integer","title":"autovacuum_naptime","minimum":1},"log_line_prefix":{"description":"Choose from one of the available log-formats. These can support popular log analyzers like pgbadger, pganalyze etc.","enum":["'pid=%p,user=%u,db=%d,app=%a,client=%h '","'%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '","'%m [%p] %q[user=%u,db=%d,app=%a] '"],"type":"string","title":"log_line_prefix"},"log_temp_files":{"description":"Log statements for each temporary file created larger than this number of kilobytes, -1 disables","maximum":2147483647,"type":"integer","title":"log_temp_files","minimum":-1},"max_locks_per_transaction":{"description":"PostgreSQL maximum locks per transaction","maximum":6400,"type":"integer","title":"max_locks_per_transaction","minimum":64},"autovacuum_vacuum_scale_factor":{"description":"Specifies a fraction of the table size to add to autovacuum_vacuum_threshold when deciding whether to trigger a VACUUM. The default is 0.2 (20% of table size)","maximum":1.0,"type":"number","title":"autovacuum_vacuum_scale_factor","minimum":0.0},"wal_writer_delay":{"description":"WAL flush interval in milliseconds. Note that setting this value to lower than the default 200ms may negatively impact performance","maximum":200,"type":"integer","title":"wal_writer_delay","minimum":10,"example":50},"track_commit_timestamp":{"description":"Record commit time of transactions.","enum":["off","on"],"type":"string","title":"track_commit_timestamp","example":"off"},"track_functions":{"description":"Enables tracking of function call counts and time used.","enum":["all","pl","none"],"type":"string","title":"track_functions"},"wal_sender_timeout":{"description":"Terminate replication connections that are inactive for longer than this amount of time, in milliseconds. Setting this value to zero disables the timeout.","anyOf":[{"maximum":0,"minimum":0},{"maximum":10800000,"minimum":5000}],"type":"integer","user_error":"Must be either 0 or between 5000 and 10800000.","title":"wal_sender_timeout","example":60000},"autovacuum_vacuum_cost_delay":{"description":"Specifies the cost delay value that will be used in automatic VACUUM operations. If -1 is specified, the regular vacuum_cost_delay value will be used. The default value is 20 milliseconds","maximum":100,"type":"integer","title":"autovacuum_vacuum_cost_delay","minimum":-1},"max_stack_depth":{"description":"Maximum depth of the stack in bytes","maximum":6291456,"type":"integer","title":"max_stack_depth","minimum":2097152},"max_parallel_workers":{"description":"Sets the maximum number of workers that the system can support for parallel queries","maximum":96,"type":"integer","title":"max_parallel_workers","minimum":0},"pg_partman_bgw.role":{"description":"Controls which role to use for pg_partman's scheduled background tasks.","type":"string","user_error":"Must consist of alpha-numeric characters, dots, underscores or dashes, may not start with dash or dot, max 64 characters","title":"pg_partman_bgw.role","maxLength":64,"example":"myrolename","pattern":"^[_A-Za-z0-9][-._A-Za-z0-9]{0,63}$"},"max_wal_senders":{"description":"PostgreSQL maximum WAL senders","maximum":64,"type":"integer","title":"max_wal_senders","minimum":20},"max_logical_replication_workers":{"description":"PostgreSQL maximum logical replication workers (taken from the pool of max_parallel_workers)","maximum":64,"type":"integer","title":"max_logical_replication_workers","minimum":4},"autovacuum_analyze_scale_factor":{"description":"Specifies a fraction of the table size to add to autovacuum_analyze_threshold when deciding whether to trigger an ANALYZE. The default is 0.2 (20% of table size)","maximum":1.0,"type":"number","title":"autovacuum_analyze_scale_factor","minimum":0.0},"max_prepared_transactions":{"description":"PostgreSQL maximum prepared transactions","maximum":10000,"type":"integer","title":"max_prepared_transactions","minimum":0},"autovacuum_analyze_threshold":{"description":"Specifies the minimum number of inserted, updated or deleted tuples needed to trigger an ANALYZE in any one table. The default is 50 tuples.","maximum":2147483647,"type":"integer","title":"autovacuum_analyze_threshold","minimum":0},"max_worker_processes":{"description":"Sets the maximum number of background processes that the system can support","maximum":96,"type":"integer","title":"max_worker_processes","minimum":8},"pg_stat_statements.track":{"description":"Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.","enum":["all","top","none"],"type":["string"],"title":"pg_stat_statements.track"},"temp_file_limit":{"description":"PostgreSQL temporary file limit in KiB, -1 for unlimited","maximum":2147483647,"type":"integer","title":"temp_file_limit","minimum":-1,"example":5000000},"bgwriter_lru_maxpages":{"description":"In each round, no more than this many buffers will be written by the background writer. Setting this to zero disables background writing. Default is 100.","maximum":1073741823,"type":"integer","title":"bgwriter_lru_maxpages","minimum":0,"example":100},"log_error_verbosity":{"description":"Controls the amount of detail written in the server log for each message that is logged.","enum":["TERSE","DEFAULT","VERBOSE"],"type":"string","title":"log_error_verbosity"},"autovacuum_freeze_max_age":{"description":"Specifies the maximum age (in transactions) that a table's pg_class.relfrozenxid field can attain before a VACUUM operation is forced to prevent transaction ID wraparound within the table. Note that the system will launch autovacuum processes to prevent wraparound even when autovacuum is otherwise disabled. This parameter will cause the server to be restarted.","maximum":1500000000,"type":"integer","title":"autovacuum_freeze_max_age","minimum":200000000,"example":200000000},"log_min_duration_statement":{"description":"Log statements that take more than this number of milliseconds to run, -1 disables","maximum":86400000,"type":"integer","title":"log_min_duration_statement","minimum":-1},"max_standby_streaming_delay":{"description":"Max standby streaming delay in milliseconds","maximum":43200000,"type":"integer","title":"max_standby_streaming_delay","minimum":1},"jit":{"description":"Controls system-wide use of Just-in-Time Compilation (JIT).","type":"boolean","title":"jit","example":true},"max_standby_archive_delay":{"description":"Max standby archive delay in milliseconds","maximum":43200000,"type":"integer","title":"max_standby_archive_delay","minimum":1},"max_slot_wal_keep_size":{"description":"PostgreSQL maximum WAL size (MB) reserved for replication slots. Default is -1 (unlimited). wal_keep_size minimum WAL size setting takes precedence over this.","maximum":2147483647,"type":"integer","title":"max_slot_wal_keep_size","minimum":-1}},"additionalProperties":false,"type":"object","title":"postgresql.conf configuration values"},"pglookout":{"properties":{"max_failover_replication_time_lag":{"description":"Number of seconds of master unavailability before triggering database failover to standby","default":60,"maximum":9223372036854775807,"type":"integer","title":"max_failover_replication_time_lag","minimum":10}},"additionalProperties":false,"default":{"max_failover_replication_time_lag":60},"type":"object","title":"PGLookout settings"},"pgbouncer":{"properties":{"min_pool_size":{"maximum":10000,"type":"integer","title":"Add more server connections to pool if below this number. Improves behavior when usual load comes suddenly back after period of total inactivity. The value is effectively capped at the pool size.","minimum":0,"example":0},"ignore_startup_parameters":{"type":"array","title":"List of parameters to ignore when given in startup packet","example":["extra_float_digits","search_path"],"items":{"enum":["extra_float_digits","search_path"],"type":"string","title":"Enum of parameters to ignore when given in startup packet"},"maxItems":32},"server_lifetime":{"maximum":86400,"type":"integer","title":"The pooler will close an unused server connection that has been connected longer than this. [seconds]","minimum":60,"example":3600},"autodb_pool_mode":{"enum":["session","transaction","statement"],"type":"string","title":"PGBouncer pool mode","example":"session"},"server_idle_timeout":{"maximum":86400,"type":"integer","title":"If a server connection has been idle more than this many seconds it will be dropped. If 0 then timeout is disabled. [seconds]","minimum":0,"example":600},"autodb_max_db_connections":{"maximum":2147483647,"type":"integer","title":"Do not allow more than this many server connections per database (regardless of user). Setting it to 0 means unlimited.","minimum":0,"example":0},"server_reset_query_always":{"type":"boolean","title":"Run server_reset_query (DISCARD ALL) in all pooling modes","example":false},"autodb_pool_size":{"maximum":10000,"type":"integer","title":"If non-zero then create automatically a pool of that size per user when a pool doesn't exist.","minimum":0,"example":0},"autodb_idle_timeout":{"maximum":86400,"type":"integer","title":"If the automatically created database pools have been unused this many seconds, they are freed. If 0 then timeout is disabled. [seconds]","minimum":0,"example":3600}},"additionalProperties":false,"type":"object","title":"PGBouncer connection pooling settings"},"timescaledb":{"properties":{"max_background_workers":{"description":"The number of background workers for timescaledb operations. You should configure this setting to the sum of your number of databases and the total number of concurrent background workers you want running at any given point in time.","maximum":4096,"type":"integer","title":"timescaledb.max_background_workers","minimum":1,"example":8}},"additionalProperties":false,"type":"object","title":"TimescaleDB extension configuration values"}}}`) +var pgSettings = exoscalesdk.GetDBAASSettingsPGResponseSettings{ + PG: &exoscalesdk.GetDBAASSettingsPGResponseSettingsPG{ + Properties: map[string]any{ + "track_activity_query_size": map[string]any{ + "description": "Specifies the number of bytes reserved to track the currently executing command for each active session.", + "maximum": 10240, + "type": "integer", + "title": "track_activity_query_size", + "minimum": 1024, + "example": 1024, + }, + "log_autovacuum_min_duration": map[string]any{ + "description": "Causes each action executed by autovacuum to be logged if it ran for at least the specified number of milliseconds. Setting this to zero logs all autovacuum actions. Minus-one (the default) disables logging autovacuum actions.", + "maximum": 2147483647, + "type": "integer", + "title": "log_autovacuum_min_duration", + "minimum": -1, + }, + "autovacuum_vacuum_cost_limit": map[string]any{ + "description": "Specifies the cost limit value that will be used in automatic VACUUM operations. If -1 is specified (which is the default), the regular vacuum_cost_limit value will be used.", + "maximum": 10000, + "type": "integer", + "title": "autovacuum_vacuum_cost_limit", + "minimum": -1, + }, + "timezone": map[string]any{ + "description": "PostgreSQL service timezone", + "type": "string", + "title": "timezone", + "maxLength": 64, + "example": "Europe/Helsinki", + }, + "track_io_timing": map[string]any{ + "description": "Enables timing of database I/O calls. This parameter is off by default, because it will repeatedly query the operating system for the current time, which may cause significant overhead on some platforms.", + "enum": []string{ + "off", + "on", + }, + "type": "string", + "title": "track_io_timing", + "example": "off", + }, + "pg_stat_monitor.pgsm_enable_query_plan": map[string]any{ + "description": "Enables or disables query plan monitoring", + "type": "boolean", + "title": "pg_stat_monitor.pgsm_enable_query_plan", + "example": false, + }, + "max_files_per_process": map[string]any{ + "description": "PostgreSQL maximum number of files that can be open per process", + "maximum": 4096, + "type": "integer", + "title": "max_files_per_process", + "minimum": 1000, + }, + "pg_stat_monitor.pgsm_max_buckets": map[string]any{ + "description": "Sets the maximum number of buckets ", + "maximum": 10, + "type": "integer", + "title": "pg_stat_monitor.pgsm_max_buckets", + "minimum": 1, + "example": 10, + }, + "bgwriter_delay": map[string]any{ + "description": "Specifies the delay between activity rounds for the background writer in milliseconds. Default is 200.", + "maximum": 10000, + "type": "integer", + "title": "bgwriter_delay", + "minimum": 10, + "example": 200, + }, + "autovacuum_max_workers": map[string]any{ + "description": "Specifies the maximum number of autovacuum processes (other than the autovacuum launcher) that may be running at any one time. The default is three. This parameter can only be set at server start.", + "maximum": 20, + "type": "integer", + "title": "autovacuum_max_workers", + "minimum": 1, + }, + "bgwriter_flush_after": map[string]any{ + "description": "Whenever more than bgwriter_flush_after bytes have been written by the background writer, attempt to force the OS to issue these writes to the underlying storage. Specified in kilobytes, default is 512. Setting of 0 disables forced writeback.", + "maximum": 2048, + "type": "integer", + "title": "bgwriter_flush_after", + "minimum": 0, + "example": 512, + }, + "default_toast_compression": map[string]any{ + "description": "Specifies the default TOAST compression method for values of compressible columns (the default is lz4).", + "enum": []string{ + "lz4", + "pglz", + }, + "type": "string", + "title": "default_toast_compression", + "example": "lz4", + }, + "deadlock_timeout": map[string]any{ + "description": "This is the amount of time, in milliseconds, to wait on a lock before checking to see if there is a deadlock condition.", + "maximum": 1800000, + "type": "integer", + "title": "deadlock_timeout", + "minimum": 500, + "example": 1000, + }, + "idle_in_transaction_session_timeout": map[string]any{ + "description": "Time out sessions with open transactions after this number of milliseconds", + "maximum": 604800000, + "type": "integer", + "title": "idle_in_transaction_session_timeout", + "minimum": 0, + }, + "max_pred_locks_per_transaction": map[string]any{ + "description": "PostgreSQL maximum predicate locks per transaction", + "maximum": 5120, + "type": "integer", + "title": "max_pred_locks_per_transaction", + "minimum": 64, + }, + "max_replication_slots": map[string]any{ + "description": "PostgreSQL maximum replication slots", + "maximum": 64, + "type": "integer", + "title": "max_replication_slots", + "minimum": 8, + }, + "autovacuum_vacuum_threshold": map[string]any{ + "description": "Specifies the minimum number of updated or deleted tuples needed to trigger a VACUUM in any one table. The default is 50 tuples", + "maximum": 2147483647, + "type": "integer", + "title": "autovacuum_vacuum_threshold", + "minimum": 0, + }, + "max_parallel_workers_per_gather": map[string]any{ + "description": "Sets the maximum number of workers that can be started by a single Gather or Gather Merge node", + "maximum": 96, + "type": "integer", + "title": "max_parallel_workers_per_gather", + "minimum": 0, + }, + "bgwriter_lru_multiplier": map[string]any{ + "description": "The average recent need for new buffers is multiplied by bgwriter_lru_multiplier to arrive at an estimate of the number that will be needed during the next round, (up to bgwriter_lru_maxpages). 1.0 represents a “just in time” policy of writing exactly the number of buffers predicted to be needed. Larger values provide some cushion against spikes in demand, while smaller values intentionally leave writes to be done by server processes. The default is 2.0.", + "maximum": 10, + "type": "number", + "title": "bgwriter_lru_multiplier", + "minimum": 0, + "example": 2.0, + }, + "pg_partman_bgw.interval": map[string]any{ + "description": "Sets the time interval to run pg_partman's scheduled tasks", + "maximum": 604800, + "type": "integer", + "title": "pg_partman_bgw.interval", + "minimum": 3600, + "example": 3600, + }, + "autovacuum_naptime": map[string]any{ + "description": "Specifies the minimum delay between autovacuum runs on any given database. The delay is measured in seconds, and the default is one minute", + "maximum": 86400, + "type": "integer", + "title": "autovacuum_naptime", + "minimum": 1, + }, + "log_line_prefix": map[string]any{ + "description": "Choose from one of the available log-formats. These can support popular log analyzers like pgbadger, pganalyze etc.", + "enum": []string{ + "'pid=%p,user=%u,db=%d,app=%a,client=%h '", + "'%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '", + "'%m [%p] %q[user=%u,db=%d,app=%a] '", + }, + "type": "string", + "title": "log_line_prefix", + }, + "log_temp_files": map[string]any{ + "description": "Log statements for each temporary file created larger than this number of kilobytes, -1 disables", + "maximum": 2147483647, + "type": "integer", + "title": "log_temp_files", + "minimum": -1, + }, + "max_locks_per_transaction": map[string]any{ + "description": "PostgreSQL maximum locks per transaction", + "maximum": 6400, + "type": "integer", + "title": "max_locks_per_transaction", + "minimum": 64, + }, + "autovacuum_vacuum_scale_factor": map[string]any{ + "description": "Specifies a fraction of the table size to add to autovacuum_vacuum_threshold when deciding whether to trigger a VACUUM. The default is 0.2 (20% of table size)", + "maximum": 1.0, + "type": "number", + "title": "autovacuum_vacuum_scale_factor", + "minimum": 0.0, + }, + "wal_writer_delay": map[string]any{ + "description": "WAL flush interval in milliseconds. Note that setting this value to lower than the default 200ms may negatively impact performance", + "maximum": 200, + "type": "integer", + "title": "wal_writer_delay", + "minimum": 10, + "example": 50, + }, + "track_commit_timestamp": map[string]any{ + "description": "Record commit time of transactions.", + "enum": []string{ + "off", + "on", + }, + "type": "string", + "title": "track_commit_timestamp", + "example": "off", + }, + "track_functions": map[string]any{ + "description": "Enables tracking of function call counts and time used.", + "enum": []string{ + "all", + "pl", + "none", + }, + "type": "string", + "title": "track_functions", + }, + "wal_sender_timeout": map[string]any{ + "description": "Terminate replication connections that are inactive for longer than this amount of time, in milliseconds. Setting this value to zero disables the timeout.", + "anyOf": []map[string]int{ + { + "maximum": 0, + "minimum": 0, + }, + { + "maximum": 10800000, + "minimum": 5000, + }, + }, + "type": "integer", + "user_error": "Must be either 0 or between 5000 and 10800000.", + "title": "wal_sender_timeout", + "example": 60000, + }, + "autovacuum_vacuum_cost_delay": map[string]any{ + "description": "Specifies the cost delay value that will be used in automatic VACUUM operations. If -1 is specified, the regular vacuum_cost_delay value will be used. The default value is 20 milliseconds", + "maximum": 100, + "type": "integer", + "title": "autovacuum_vacuum_cost_delay", + "minimum": -1, + }, + "max_stack_depth": map[string]any{ + "description": "Maximum depth of the stack in bytes", + "maximum": 6291456, + "type": "integer", + "title": "max_stack_depth", + "minimum": 2097152, + }, + "max_parallel_workers": map[string]any{ + "description": "Sets the maximum number of workers that the system can support for parallel queries", + "maximum": 96, + "type": "integer", + "title": "max_parallel_workers", + "minimum": 0, + }, + "pg_partman_bgw.role": map[string]any{ + "description": "Controls which role to use for pg_partman's scheduled background tasks.", + "type": "string", + "user_error": "Must consist of alpha-numeric characters, dots, underscores or dashes, may not start with dash or dot, max 64 characters", + "title": "pg_partman_bgw.role", + "maxLength": 64, + "example": "myrolename", + "pattern": "^[_A-Za-z0-9][-._A-Za-z0-9]{0,63}$", + }, + "max_wal_senders": map[string]any{ + "description": "PostgreSQL maximum WAL senders", + "maximum": 64, + "type": "integer", + "title": "max_wal_senders", + "minimum": 20, + }, + "max_logical_replication_workers": map[string]any{ + "description": "PostgreSQL maximum logical replication workers (taken from the pool of max_parallel_workers)", + "maximum": 64, + "type": "integer", + "title": "max_logical_replication_workers", + "minimum": 4, + }, + "autovacuum_analyze_scale_factor": map[string]any{ + "description": "Specifies a fraction of the table size to add to autovacuum_analyze_threshold when deciding whether to trigger an ANALYZE. The default is 0.2 (20% of table size)", + "maximum": 1.0, + "type": "number", + "title": "autovacuum_analyze_scale_factor", + "minimum": 0.0, + }, + "max_prepared_transactions": map[string]any{ + "description": "PostgreSQL maximum prepared transactions", + "maximum": 10000, + "type": "integer", + "title": "max_prepared_transactions", + "minimum": 0, + }, + "autovacuum_analyze_threshold": map[string]any{ + "description": "Specifies the minimum number of inserted, updated or deleted tuples needed to trigger an ANALYZE in any one table. The default is 50 tuples.", + "maximum": 2147483647, + "type": "integer", + "title": "autovacuum_analyze_threshold", + "minimum": 0, + }, + "max_worker_processes": map[string]any{ + "description": "Sets the maximum number of background processes that the system can support", + "maximum": 96, + "type": "integer", + "title": "max_worker_processes", + "minimum": 8, + }, + "pg_stat_statements.track": map[string]any{ + "description": "Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.", + "enum": []string{ + "all", + "top", + "none", + }, + "type": []string{ + "string", + }, + "title": "pg_stat_statements.track", + }, + "temp_file_limit": map[string]any{ + "description": "PostgreSQL temporary file limit in KiB, -1 for unlimited", + "maximum": 2147483647, + "type": "integer", + "title": "temp_file_limit", + "minimum": -1, + "example": 5000000, + }, + "bgwriter_lru_maxpages": map[string]any{ + "description": "In each round, no more than this many buffers will be written by the background writer. Setting this to zero disables background writing. Default is 100.", + "maximum": 1073741823, + "type": "integer", + "title": "bgwriter_lru_maxpages", + "minimum": 0, + "example": 100, + }, + "log_error_verbosity": map[string]any{ + "description": "Controls the amount of detail written in the server log for each message that is logged.", + "enum": []string{ + "TERSE", + "DEFAULT", + "VERBOSE", + }, + "type": "string", + "title": "log_error_verbosity", + }, + "autovacuum_freeze_max_age": map[string]any{ + "description": "Specifies the maximum age (in transactions) that a table's pg_class.relfrozenxid field can attain before a VACUUM operation is forced to prevent transaction ID wraparound within the table. Note that the system will launch autovacuum processes to prevent wraparound even when autovacuum is otherwise disabled. This parameter will cause the server to be restarted.", + "maximum": 1500000000, + "type": "integer", + "title": "autovacuum_freeze_max_age", + "minimum": 200000000, + "example": 200000000, + }, + "log_min_duration_statement": map[string]any{ + "description": "Log statements that take more than this number of milliseconds to run, -1 disables", + "maximum": 86400000, + "type": "integer", + "title": "log_min_duration_statement", + "minimum": -1, + }, + "max_standby_streaming_delay": map[string]any{ + "description": "Max standby streaming delay in milliseconds", + "maximum": 43200000, + "type": "integer", + "title": "max_standby_streaming_delay", + "minimum": 1, + }, + "jit": map[string]any{ + "description": "Controls system-wide use of Just-in-Time Compilation (JIT).", + "type": "boolean", + "title": "jit", + "example": true, + }, + "max_standby_archive_delay": map[string]any{ + "description": "Max standby archive delay in milliseconds", + "maximum": 43200000, + "type": "integer", + "title": "max_standby_archive_delay", + "minimum": 1, + }, + "max_slot_wal_keep_size": map[string]any{ + "description": "PostgreSQL maximum WAL size (MB) reserved for replication slots. Default is -1 (unlimited). wal_keep_size minimum WAL size setting takes precedence over this.", + "maximum": 2147483647, + "type": "integer", + "title": "max_slot_wal_keep_size", + "minimum": -1, + }, + }, + AdditionalProperties: ptr.To[bool](false), + Type: "object", + Title: "postgresql.conf configuration values", + }, + Pglookout: &exoscalesdk.GetDBAASSettingsPGResponseSettingsPglookout{ + Properties: map[string]any{ + "max_failover_replication_time_lag": map[string]any{ + "description": "Number of seconds of master unavailability before triggering database failover to standby", + "default": 60, + "maximum": 9223372036854775807, + "type": "integer", + "title": "max_failover_replication_time_lag", + "minimum": 10, + }, + }, + AdditionalProperties: ptr.To[bool](false), + Type: "object", + Title: "PGLookout settings", + }, + Pgbouncer: &exoscalesdk.GetDBAASSettingsPGResponseSettingsPgbouncer{ + Properties: map[string]any{ + "min_pool_size": map[string]any{ + "maximum": 10000, + "type": "integer", + "title": "Add more server connections to pool if below this number. Improves behavior when usual load comes suddenly back after period of total inactivity. The value is effectively capped at the pool size.", + "minimum": 0, + "example": 0, + }, + "ignore_startup_parameters": map[string]any{ + "type": "array", + "title": "List of parameters to ignore when given in startup packet", + "example": []string{ + "extra_float_digits", + "search_path", + }, + "items": map[string]any{ + "enum": []string{ + "extra_float_digits", + "search_path", + }, + "type": "string", + "title": "Enum of parameters to ignore when given in startup packet", + }, + "maxItems": 32, + }, + "server_lifetime": map[string]any{ + "maximum": 86400, + "type": "integer", + "title": "The pooler will close an unused server connection that has been connected longer than this. [seconds]", + "minimum": 60, + "example": 3600, + }, + "autodb_pool_mode": map[string]any{ + "enum": []string{ + "session", + "transaction", + "statement", + }, + "type": "string", + "title": "PGBouncer pool mode", + "example": "session", + }, + "server_idle_timeout": map[string]any{ + "maximum": 86400, + "type": "integer", + "title": "If a server connection has been idle more than this many seconds it will be dropped. If 0 then timeout is disabled. [seconds]", + "minimum": 0, + "example": 600, + }, + "autodb_max_db_connections": map[string]any{ + "maximum": 2147483647, + "type": "integer", + "title": "Do not allow more than this many server connections per database (regardless of user). Setting it to 0 means unlimited.", + "minimum": 0, + "example": 0, + }, + "server_reset_query_always": map[string]any{ + "type": "boolean", + "title": "Run server_reset_query (DISCARD ALL) in all pooling modes", + "example": false, + }, + "autodb_pool_size": map[string]any{ + "maximum": 10000, + "type": "integer", + "title": "If non-zero then create automatically a pool of that size per user when a pool doesn't exist.", + "minimum": 0, + "example": 0, + }, + "autodb_idle_timeout": map[string]any{ + "maximum": 86400, + "type": "integer", + "title": "If the automatically created database pools have been unused this many seconds, they are freed. If 0 then timeout is disabled. [seconds]", + "minimum": 0, + "example": 3600, + }, + }, + AdditionalProperties: ptr.To[bool](false), + Type: "object", + Title: "PGBouncer connection pooling settings", + }, + Timescaledb: &exoscalesdk.GetDBAASSettingsPGResponseSettingsTimescaledb{ + Properties: map[string]any{ + "max_background_workers": map[string]any{ + "description": "The number of background workers for timescaledb operations. You should configure this setting to the sum of your number of databases and the total number of concurrent background workers you want running at any given point in time.", + "maximum": 4096, + "type": "integer", + "title": "timescaledb.max_background_workers", + "minimum": 1, + "example": 8, + }, + }, + AdditionalProperties: ptr.To[bool](false), + Type: "object", + Title: "TimescaleDB extension configuration values", + }, +} diff --git a/operator/postgresqlcontroller/update.go b/operator/postgresqlcontroller/update.go index 385d1634..d02d04a4 100644 --- a/operator/postgresqlcontroller/update.go +++ b/operator/postgresqlcontroller/update.go @@ -2,12 +2,13 @@ package postgresqlcontroller import ( "context" + "encoding/json" "fmt" "github.com/crossplane/crossplane-runtime/pkg/errors" "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" "github.com/crossplane/crossplane-runtime/pkg/resource" - "github.com/exoscale/egoscale/v2/oapi" + exoscalesdk "github.com/exoscale/egoscale/v3" exoscalev1 "github.com/vshn/provider-exoscale/apis/exoscale/v1" "github.com/vshn/provider-exoscale/operator/mapper" controllerruntime "sigs.k8s.io/controller-runtime" @@ -18,50 +19,53 @@ func (p *pipeline) Update(ctx context.Context, mg resource.Managed) (managed.Ext log := controllerruntime.LoggerFrom(ctx) log.V(1).Info("Updating resource") - pgInstance := fromManaged(mg) + pgInstance := mg.(*exoscalev1.PostgreSQL) spec := pgInstance.Spec.ForProvider body, err := fromSpecToUpdateBody(spec) if err != nil { return managed.ExternalUpdate{}, errors.Wrap(err, "cannot map spec to API request") } - resp, err := p.exo.UpdateDbaasServicePgWithResponse(ctx, oapi.DbaasServiceName(pgInstance.Name), body) + resp, err := p.exo.UpdateDBAASServicePG(ctx, pgInstance.Name, body) if err != nil { return managed.ExternalUpdate{}, errors.Wrap(err, "cannot update instance") } - log.V(1).Info("Response", "json", resp.JSON200) + log.V(1).Info("Response", "message", resp.Message) return managed.ExternalUpdate{}, nil } // fromSpecToUpdateBody places the given spec into the request body. -func fromSpecToUpdateBody(spec exoscalev1.PostgreSQLParameters) (oapi.UpdateDbaasServicePgJSONRequestBody, error) { +func fromSpecToUpdateBody(spec exoscalev1.PostgreSQLParameters) (exoscalesdk.UpdateDBAASServicePGRequest, error) { /** NOTE: If you change anything below, also update fromSpecToUpdateBody(). Unfortunately the generated openapi-types in exoscale are unusable for reusing same properties. */ backupSchedule, err := mapper.ToBackupSchedule(spec.Backup.TimeOfDay) if err != nil { - return oapi.UpdateDbaasServicePgJSONRequestBody{}, fmt.Errorf("invalid backup schedule: %w", err) + return exoscalesdk.UpdateDBAASServicePGRequest{}, fmt.Errorf("invalid backup schedule: %w", err) } - settings, err := mapper.ToMap(spec.PGSettings) - if err != nil { - return oapi.UpdateDbaasServicePgJSONRequestBody{}, fmt.Errorf("invalid pgsettings: %w", err) + settings := exoscalesdk.JSONSchemaPG{} + if len(spec.PGSettings.Raw) != 0 { + err = json.Unmarshal(spec.PGSettings.Raw, &settings) + if err != nil { + return exoscalesdk.UpdateDBAASServicePGRequest{}, fmt.Errorf("invalid pgsettings: %w", err) + } } - return oapi.UpdateDbaasServicePgJSONRequestBody{ - Plan: &spec.Size.Plan, - BackupSchedule: &backupSchedule, - Variant: &variantAiven, + return exoscalesdk.UpdateDBAASServicePGRequest{ + Plan: spec.Size.Plan, + BackupSchedule: &exoscalesdk.UpdateDBAASServicePGRequestBackupSchedule{ + BackupHour: backupSchedule.BackupHour, + BackupMinute: backupSchedule.BackupMinute, + }, + Variant: variantAiven, // Version: pointer.String(spec.Version) -> Version cannot be changed, TerminationProtection: &spec.TerminationProtection, - Maintenance: &struct { - Dow oapi.UpdateDbaasServicePgJSONBodyMaintenanceDow `json:"dow"` - Time string `json:"time"` - }{ - Dow: oapi.UpdateDbaasServicePgJSONBodyMaintenanceDow(spec.Maintenance.DayOfWeek), + Maintenance: &exoscalesdk.UpdateDBAASServicePGRequestMaintenance{ + Dow: exoscalesdk.UpdateDBAASServicePGRequestMaintenanceDow(spec.Maintenance.DayOfWeek), Time: spec.Maintenance.TimeOfDay.String(), }, - IpFilter: mapper.ToSlicePtr(spec.IPFilter), - PgSettings: &settings, + IPFilter: spec.IPFilter, + PGSettings: settings, }, nil } diff --git a/operator/postgresqlcontroller/webhook.go b/operator/postgresqlcontroller/webhook.go index 2008ce08..79d6a306 100644 --- a/operator/postgresqlcontroller/webhook.go +++ b/operator/postgresqlcontroller/webhook.go @@ -3,10 +3,12 @@ package postgresqlcontroller import ( "context" "fmt" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" - exoscalesdk "github.com/exoscale/egoscale/v2" + exoscalesdk "github.com/exoscale/egoscale/v3" exoscalev1 "github.com/vshn/provider-exoscale/apis/exoscale/v1" + "github.com/vshn/provider-exoscale/operator/common" "github.com/vshn/provider-exoscale/operator/pipelineutil" "github.com/vshn/provider-exoscale/operator/webhook" "sigs.k8s.io/controller-runtime/pkg/client" @@ -36,7 +38,7 @@ func (v *Validator) ValidateCreate(ctx context.Context, obj runtime.Object) (adm return nil, err } - err = v.validateVersion(ctx, obj, *availableVersions) + err = v.validateVersion(ctx, obj, availableVersions) if err != nil { return nil, err } @@ -44,27 +46,26 @@ func (v *Validator) ValidateCreate(ctx context.Context, obj runtime.Object) (adm return nil, v.validateSpec(instance) } -func (v *Validator) getAvailableVersions(ctx context.Context, obj runtime.Object) (*[]string, error) { +func (v *Validator) getAvailableVersions(ctx context.Context, obj runtime.Object) ([]string, error) { instance := obj.(*exoscalev1.PostgreSQL) v.log.V(1).Info("get postgres available versions") - exo, err := pipelineutil.OpenExoscaleClient(ctx, v.kube, instance.GetProviderConfigName(), exoscalesdk.ClientOptWithAPIEndpoint(fmt.Sprintf("https://api-%s.exoscale.com", instance.Spec.ForProvider.Zone))) + exo, err := pipelineutil.OpenExoscaleClient(ctx, v.kube, instance.GetProviderConfigName(), exoscalesdk.ClientOptWithEndpoint(common.ZoneTranslation[instance.Spec.ForProvider.Zone])) if err != nil { return nil, fmt.Errorf("open exoscale client failed: %w", err) } - resp, err := exo.Exoscale.GetDbaasServiceTypeWithResponse(ctx, serviceType) + resp, err := exo.Exoscale.GetDBAASServiceType(ctx, serviceType) if err != nil { return nil, fmt.Errorf("get DBaaS service type failed: %w", err) } - v.log.V(1).Info("DBaaS service type", "body", string(resp.Body)) + v.log.V(1).Info("DBaaS service type", "name", string(resp.Name), "description", string(resp.Description)) - serviceType := *resp.JSON200 - if serviceType.AvailableVersions == nil { + if resp.AvailableVersions == nil { return nil, fmt.Errorf("postgres available versions not found") } - return serviceType.AvailableVersions, nil + return resp.AvailableVersions, nil } func (v *Validator) validateVersion(ctx context.Context, obj runtime.Object, availableVersions []string) error { diff --git a/operator/rediscontroller/connector.go b/operator/rediscontroller/connector.go index e597869f..59b045bf 100644 --- a/operator/rediscontroller/connector.go +++ b/operator/rediscontroller/connector.go @@ -2,15 +2,15 @@ package rediscontroller import ( "context" - "fmt" exoscalev1 "github.com/vshn/provider-exoscale/apis/exoscale/v1" + "github.com/vshn/provider-exoscale/operator/common" "github.com/vshn/provider-exoscale/operator/pipelineutil" "github.com/crossplane/crossplane-runtime/pkg/event" "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" "github.com/crossplane/crossplane-runtime/pkg/resource" - exoscalesdk "github.com/exoscale/egoscale/v2" + exoscalesdk "github.com/exoscale/egoscale/v3" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -18,7 +18,6 @@ import ( type connector struct { kube client.Client recorder event.Recorder - p *pipeline } // Connect implements managed.ExternalConnecter. @@ -26,13 +25,9 @@ func (c *connector) Connect(ctx context.Context, mg resource.Managed) (managed.E log := ctrl.LoggerFrom(ctx) log.V(1).Info("connecting resource") - if c.p != nil { - return c.p, nil - } - redisInstance := mg.(*exoscalev1.Redis) - exo, err := pipelineutil.OpenExoscaleClient(ctx, c.kube, redisInstance.GetProviderConfigName(), exoscalesdk.ClientOptWithAPIEndpoint(fmt.Sprintf("https://api-%s.exoscale.com", redisInstance.Spec.ForProvider.Zone))) + exo, err := pipelineutil.OpenExoscaleClient(ctx, c.kube, redisInstance.GetProviderConfigName(), exoscalesdk.ClientOptWithEndpoint(common.ZoneTranslation[redisInstance.Spec.ForProvider.Zone])) if err != nil { return nil, err } diff --git a/operator/rediscontroller/create.go b/operator/rediscontroller/create.go index c196e5a4..9257ae76 100644 --- a/operator/rediscontroller/create.go +++ b/operator/rediscontroller/create.go @@ -2,14 +2,14 @@ package rediscontroller import ( "context" + "encoding/json" "fmt" + exoscalesdk "github.com/exoscale/egoscale/v3" exoscalev1 "github.com/vshn/provider-exoscale/apis/exoscale/v1" - "github.com/vshn/provider-exoscale/operator/mapper" "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" "github.com/crossplane/crossplane-runtime/pkg/resource" - "github.com/exoscale/egoscale/v2/oapi" controllerruntime "sigs.k8s.io/controller-runtime" ) @@ -21,31 +21,29 @@ func (p pipeline) Create(ctx context.Context, mg resource.Managed) (managed.Exte spec := redisInstance.Spec.ForProvider ipFilter := []string(spec.IPFilter) - settings, err := mapper.ToMap(spec.RedisSettings) - if err != nil { - return managed.ExternalCreation{}, fmt.Errorf("invalid redis settings: %w", err) + settings := exoscalesdk.JSONSchemaRedis{} + if len(spec.RedisSettings.Raw) != 0 { + err := json.Unmarshal(spec.RedisSettings.Raw, &settings) + if err != nil { + return managed.ExternalCreation{}, fmt.Errorf("cannot map redisInstance settings: %w", err) + } } - body := oapi.CreateDbaasServiceRedisJSONRequestBody{ - IpFilter: &ipFilter, - Maintenance: &struct { - Dow oapi.CreateDbaasServiceRedisJSONBodyMaintenanceDow `json:"dow"` - - // Time for installing updates, UTC - Time string `json:"time"` - }{ - Dow: oapi.CreateDbaasServiceRedisJSONBodyMaintenanceDow(spec.Maintenance.DayOfWeek), + body := exoscalesdk.CreateDBAASServiceRedisRequest{ + IPFilter: ipFilter, + Maintenance: &exoscalesdk.CreateDBAASServiceRedisRequestMaintenance{ + Dow: exoscalesdk.CreateDBAASServiceRedisRequestMaintenanceDow(spec.Maintenance.DayOfWeek), Time: spec.Maintenance.TimeOfDay.String(), }, Plan: spec.Size.Plan, RedisSettings: &settings, TerminationProtection: &spec.TerminationProtection, } - resp, err := p.exo.CreateDbaasServiceRedisWithResponse(ctx, oapi.DbaasServiceName(redisInstance.GetInstanceName()), body) + resp, err := p.exo.CreateDBAASServiceRedis(ctx, redisInstance.GetInstanceName(), body) if err != nil { return managed.ExternalCreation{}, fmt.Errorf("unable to create instance: %w", err) } - log.V(1).Info("response", "body", string(resp.Body)) + log.V(1).Info("response", "message", string(resp.Message)) return managed.ExternalCreation{}, nil } diff --git a/operator/rediscontroller/delete.go b/operator/rediscontroller/delete.go index 3c6d14e0..aae0ee9d 100644 --- a/operator/rediscontroller/delete.go +++ b/operator/rediscontroller/delete.go @@ -15,10 +15,10 @@ func (p pipeline) Delete(ctx context.Context, mg resource.Managed) error { log.Info("deleting resource") redisInstance := mg.(*exoscalev1.Redis) - resp, err := p.exo.DeleteDbaasServiceWithResponse(ctx, redisInstance.GetInstanceName()) + resp, err := p.exo.DeleteDBAASServiceRedis(ctx, redisInstance.GetInstanceName()) if err != nil { return fmt.Errorf("cannot delete instance: %w", err) } - log.V(1).Info("response", "body", string(resp.Body)) + log.V(1).Info("response", "message", string(resp.Message)) return nil } diff --git a/operator/rediscontroller/observe.go b/operator/rediscontroller/observe.go index 6b434aba..1eb8e736 100644 --- a/operator/rediscontroller/observe.go +++ b/operator/rediscontroller/observe.go @@ -2,18 +2,20 @@ package rediscontroller import ( "context" + "encoding/json" "errors" "fmt" - "k8s.io/utils/ptr" "net/url" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/ptr" + + exoscalesdk "github.com/exoscale/egoscale/v3" exoscalev1 "github.com/vshn/provider-exoscale/apis/exoscale/v1" "github.com/vshn/provider-exoscale/operator/mapper" "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" "github.com/crossplane/crossplane-runtime/pkg/resource" - exoscaleapi "github.com/exoscale/egoscale/v2/api" - "github.com/exoscale/egoscale/v2/oapi" "github.com/go-logr/logr" controllerruntime "sigs.k8s.io/controller-runtime" ) @@ -24,16 +26,14 @@ func (p pipeline) Observe(ctx context.Context, mg resource.Managed) (managed.Ext redisInstance := mg.(*exoscalev1.Redis) - resp, err := p.exo.GetDbaasServiceRedisWithResponse(ctx, oapi.DbaasServiceName(redisInstance.GetInstanceName())) + redis, err := p.exo.GetDBAASServiceRedis(ctx, redisInstance.GetInstanceName()) if err != nil { - if errors.Is(err, exoscaleapi.ErrNotFound) { + if errors.Is(err, exoscalesdk.ErrNotFound) { return managed.ExternalObservation{}, nil } return managed.ExternalObservation{}, fmt.Errorf("unable to observe instance: %w", err) } - redis := *resp.JSON200 - log.V(2).Info("response", "raw", string(resp.Body)) log.V(1).Info("retrieved instance", "state", redis.State) redisInstance.Status.AtProvider, err = mapObservation(redis) @@ -41,18 +41,18 @@ func (p pipeline) Observe(ctx context.Context, mg resource.Managed) (managed.Ext log.Error(err, "unable to fully map observation, ignoring.") } - var state oapi.EnumServiceState - if redis.State != nil { - state = *redis.State + var state exoscalesdk.EnumServiceState + if redis.State != "" { + state = redis.State } switch state { - case oapi.EnumServiceStateRunning: + case exoscalesdk.EnumServiceStateRunning: redisInstance.SetConditions(exoscalev1.Running()) - case oapi.EnumServiceStateRebuilding: + case exoscalesdk.EnumServiceStateRebuilding: redisInstance.SetConditions(exoscalev1.Rebuilding()) - case oapi.EnumServiceStatePoweroff: + case exoscalesdk.EnumServiceStatePoweroff: redisInstance.SetConditions(exoscalev1.PoweredOff()) - case oapi.EnumServiceStateRebalancing: + case exoscalesdk.EnumServiceStateRebalancing: redisInstance.SetConditions(exoscalev1.Rebalancing()) default: log.V(2).Info("ignoring unknown instance state", "state", state) @@ -62,12 +62,12 @@ func (p pipeline) Observe(ctx context.Context, mg resource.Managed) (managed.Ext if err != nil { return managed.ExternalObservation{}, err } - cd, err := connectionDetails(redis) + cd, err := connectionDetails(ctx, redis, p.exo) if err != nil { return managed.ExternalObservation{}, fmt.Errorf("unable to parse connection details: %w", err) } - currentParams, err := setSettingsDefaults(ctx, p.exo, &redisInstance.Spec.ForProvider) + currentParams, err := setSettingsDefaults(ctx, *p.exo, &redisInstance.Spec.ForProvider) if err != nil { log.Error(err, "unable to set redis settings schema") currentParams = &redisInstance.Spec.ForProvider @@ -105,36 +105,45 @@ func isUpToDate(current, external *exoscalev1.RedisParameters, log logr.Logger) return ok } -func connectionDetails(in oapi.DbaasServiceRedis) (map[string][]byte, error) { - if in.Uri == nil { - return map[string][]byte{}, nil +func connectionDetails(ctx context.Context, in *exoscalesdk.DBAASServiceRedis, client *exoscalesdk.Client) (managed.ConnectionDetails, error) { + uri := in.URI + // uri may be absent + if uri == "" { + if in.ConnectionInfo == nil || in.ConnectionInfo.URI == nil || len(in.ConnectionInfo.URI) == 0 { + return map[string][]byte{}, nil + } + uri = in.ConnectionInfo.URI[0] } - - uri := *in.Uri parsed, err := url.Parse(uri) if err != nil { - return nil, err + return nil, fmt.Errorf("cannot parse connection URI: %w", err) + } + password, err := client.RevealDBAASRedisUserPassword(ctx, string(in.Name), parsed.User.Username()) + if err != nil { + return nil, fmt.Errorf("cannot reveal password for Redis instance: %w", err) } - password, _ := parsed.User.Password() return map[string][]byte{ "REDIS_HOST": []byte(parsed.Hostname()), "REDIS_PORT": []byte(parsed.Port()), "REDIS_USERNAME": []byte(parsed.User.Username()), - "REDIS_PASSWORD": []byte(password), + "REDIS_PASSWORD": []byte(password.Password), "REDIS_URL": []byte(uri), }, nil } -func mapObservation(instance oapi.DbaasServiceRedis) (exoscalev1.RedisObservation, error) { +func mapObservation(instance *exoscalesdk.DBAASServiceRedis) (exoscalev1.RedisObservation, error) { + jsonSettings, err := json.Marshal(instance.RedisSettings) + if err != nil { + return exoscalev1.RedisObservation{}, fmt.Errorf("error parsing RedisSettings") + } + + settings := runtime.RawExtension{Raw: jsonSettings} + observation := exoscalev1.RedisObservation{ - Version: ptr.Deref(instance.Version, ""), + Version: instance.Version, NodeStates: mapper.ToNodeStates(instance.NodeStates), } - settings, err := mapper.ToRawExtension(instance.RedisSettings) - if err != nil { - return observation, fmt.Errorf("settings: %w", err) - } observation.RedisSettings = settings notifications, err := mapper.ToNotifications(instance.Notifications) @@ -146,11 +155,14 @@ func mapObservation(instance oapi.DbaasServiceRedis) (exoscalev1.RedisObservatio return observation, nil } -func mapParameters(in oapi.DbaasServiceRedis, zone exoscalev1.Zone) (*exoscalev1.RedisParameters, error) { - settings, err := mapper.ToRawExtension(in.RedisSettings) +func mapParameters(in *exoscalesdk.DBAASServiceRedis, zone exoscalev1.Zone) (*exoscalev1.RedisParameters, error) { + jsonSettings, err := json.Marshal(in.RedisSettings) if err != nil { - return nil, fmt.Errorf("unable to parse settings: %w", err) + return nil, fmt.Errorf("cannot parse redisInstance settings: %w", err) } + + settings := runtime.RawExtension{Raw: jsonSettings} + return &exoscalev1.RedisParameters{ Maintenance: exoscalev1.MaintenanceSpec{ DayOfWeek: in.Maintenance.Dow, @@ -162,7 +174,7 @@ func mapParameters(in oapi.DbaasServiceRedis, zone exoscalev1.Zone) (*exoscalev1 Size: exoscalev1.SizeSpec{ Plan: in.Plan, }, - IPFilter: *in.IpFilter, + IPFilter: in.IPFilter, }, RedisSettings: settings, }, nil diff --git a/operator/rediscontroller/pipeline.go b/operator/rediscontroller/pipeline.go index 8337f3ff..d8207eb2 100644 --- a/operator/rediscontroller/pipeline.go +++ b/operator/rediscontroller/pipeline.go @@ -2,7 +2,7 @@ package rediscontroller import ( "github.com/crossplane/crossplane-runtime/pkg/event" - "github.com/exoscale/egoscale/v2/oapi" + exoscalesdk "github.com/exoscale/egoscale/v3" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -10,11 +10,11 @@ import ( type pipeline struct { kube client.Client recorder event.Recorder - exo oapi.ClientWithResponsesInterface + exo *exoscalesdk.Client } // newPipeline returns a new instance of pipeline. -func newPipeline(client client.Client, recorder event.Recorder, exoscaleClient oapi.ClientWithResponsesInterface) *pipeline { +func newPipeline(client client.Client, recorder event.Recorder, exoscaleClient *exoscalesdk.Client) *pipeline { return &pipeline{ kube: client, recorder: recorder, diff --git a/operator/rediscontroller/settings.go b/operator/rediscontroller/settings.go index d664fc68..078841b1 100644 --- a/operator/rediscontroller/settings.go +++ b/operator/rediscontroller/settings.go @@ -2,15 +2,16 @@ package rediscontroller import ( "context" + "encoding/json" + exoscalesdk "github.com/exoscale/egoscale/v3" exoscalev1 "github.com/vshn/provider-exoscale/apis/exoscale/v1" - "github.com/exoscale/egoscale/v2/oapi" "github.com/vshn/provider-exoscale/internal/settings" ) type settingsFetcher interface { - GetDbaasSettingsRedisWithResponse(ctx context.Context, reqEditors ...oapi.RequestEditorFn) (*oapi.GetDbaasSettingsRedisResponse, error) + GetDBAASSettingsRedis(ctx context.Context) (*exoscalesdk.GetDBAASSettingsRedisResponse, error) } func setSettingsDefaults(ctx context.Context, f settingsFetcher, in *exoscalev1.RedisParameters) (*exoscalev1.RedisParameters, error) { @@ -29,11 +30,15 @@ func setSettingsDefaults(ctx context.Context, f settingsFetcher, in *exoscalev1. } func fetchSettingSchema(ctx context.Context, f settingsFetcher) (settings.Schemas, error) { - resp, err := f.GetDbaasSettingsRedisWithResponse(ctx) + resp, err := f.GetDBAASSettingsRedis(ctx) if err != nil { return nil, err } - schemas, err := settings.ParseSchemas(resp.Body) + settingsJson, err := json.Marshal(resp) + if err != nil { + return nil, err + } + schemas, err := settings.ParseSchemas(settingsJson) if err != nil { return nil, err } diff --git a/operator/rediscontroller/settings_test.go b/operator/rediscontroller/settings_test.go index 97b0f619..b8755fba 100644 --- a/operator/rediscontroller/settings_test.go +++ b/operator/rediscontroller/settings_test.go @@ -4,15 +4,13 @@ import ( "context" "testing" - "github.com/exoscale/egoscale/v2/oapi" + exoscalesdk "github.com/exoscale/egoscale/v3" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" exoscalev1 "github.com/vshn/provider-exoscale/apis/exoscale/v1" "github.com/vshn/provider-exoscale/operator/mapper" ) -var rawResponse = []byte(`{"settings":{"redis":{"type":"object","title":"Redis settings","properties":{"ssl":{"default":true,"type":"boolean","title":"Require SSL to access Redis"},"lfu_log_factor":{"default":10,"maximum":100,"type":"integer","title":"Counter logarithm factor for volatile-lfu and allkeys-lfu maxmemory-policies","minimum":0},"maxmemory_policy":{"enum":["noeviction","allkeys-lru","volatile-lru","allkeys-random","volatile-random","volatile-ttl","volatile-lfu","allkeys-lfu"],"default":"noeviction","type":["string","null"],"title":"Redis maxmemory-policy"},"io_threads":{"maximum":32,"type":"integer","title":"Redis IO thread count","minimum":1,"example":1},"lfu_decay_time":{"default":1,"maximum":120,"type":"integer","title":"LFU maxmemory-policy counter decay time in minutes","minimum":1},"pubsub_client_output_buffer_limit":{"description":"Set output buffer limit for pub / sub clients in MB. The value is the hard limit, the soft limit is 1/4 of the hard limit. When setting the limit, be mindful of the available memory in the selected service plan.","maximum":512,"type":"integer","title":"Pub/sub client output buffer hard limit in MB","minimum":32,"example":64},"notify_keyspace_events":{"default":"","type":"string","title":"Set notify-keyspace-events option","maxLength":32,"pattern":"^[KEg\\$lshzxeA]*$"},"persistence":{"description":"When persistence is 'rdb', Redis does RDB dumps each 10 minutes if any key is changed. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is 'off', no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.","enum":["off","rdb"],"type":"string","title":"Redis persistence"},"timeout":{"default":300,"maximum":31536000,"type":"integer","title":"Redis idle connection timeout in seconds","minimum":0},"acl_channels_default":{"description":"Determines default pub/sub channels' ACL for new users if ACL is not supplied. When this option is not defined, all_channels is assumed to keep backward compatibility. This option doesn't affect Redis configuration acl-pubsub-default.","enum":["allchannels","resetchannels"],"type":"string","title":"Default ACL for pub/sub channels used when Redis user is created"},"number_of_databases":{"description":"Set number of redis databases. Changing this will cause a restart of redis service.","maximum":128,"type":"integer","title":"Number of redis databases","minimum":1,"example":16}}}}}`) - //nolint:golint,unused var emptyRedisSettings = map[string]interface{}{ "lfu_decay_time": 1, @@ -25,9 +23,9 @@ var emptyRedisSettings = map[string]interface{}{ type fakeSettingsFetcher struct{} -func (fakeSettingsFetcher) GetDbaasSettingsRedisWithResponse(ctx context.Context, reqEditors ...oapi.RequestEditorFn) (*oapi.GetDbaasSettingsRedisResponse, error) { - return &oapi.GetDbaasSettingsRedisResponse{ - Body: rawResponse, +func (fakeSettingsFetcher) GetDBAASSettingsRedis(ctx context.Context) (*exoscalesdk.GetDBAASSettingsRedisResponse, error) { + return &exoscalesdk.GetDBAASSettingsRedisResponse{ + Settings: &redisSettings, }, nil } @@ -62,3 +60,104 @@ func TestDefaultSettings(t *testing.T) { assert.EqualValues(t, 300, setingsWithDefaults["timeout"]) assert.EqualValues(t, "noeviction", setingsWithDefaults["maxmemory_policy"]) } + +var redisSettings = exoscalesdk.GetDBAASSettingsRedisResponseSettings{ + Redis: &exoscalesdk.GetDBAASSettingsRedisResponseSettingsRedis{ + Properties: map[string]any{ + "ssl": map[string]any{ + "default": true, + "type": "boolean", + "title": "Require SSL to access Redis", + }, + "lfu_log_factor": map[string]any{ + "default": 10, + "maximum": 100, + "type": "integer", + "title": "Counter logarithm factor for volatile-lfu and allkeys-lfu maxmemory-policies", + "minimum": 0, + }, + "maxmemory_policy": map[string]any{ + "enum": []string{ + "noeviction", + "allkeys-lru", + "volatile-lru", + "allkeys-random", + "volatile-random", + "volatile-ttl", + "volatile-lfu", + "allkeys-lfu", + }, + "default": "noeviction", + "type": []string{ + "string", + "null", + }, + "title": "Redis maxmemory-policy", + }, + "io_threads": map[string]any{ + "maximum": 32, + "type": "integer", + "title": "Redis IO thread count", + "minimum": 1, + "example": 1, + }, + "lfu_decay_time": map[string]any{ + "default": 1, + "maximum": 120, + "type": "integer", + "title": "LFU maxmemory-policy counter decay time in minutes", + "minimum": 1, + }, + "pubsub_client_output_buffer_limit": map[string]any{ + "description": "Set output buffer limit for pub / sub clients in MB. The value is the hard limit, the soft limit is 1/4 of the hard limit. When setting the limit, be mindful of the available memory in the selected service plan.", + "maximum": 512, + "type": "integer", + "title": "Pub/sub client output buffer hard limit in MB", + "minimum": 32, + "example": 64, + }, + "notify_keyspace_events": map[string]any{ + "default": "", + "type": "string", + "title": "Set notify-keyspace-events option", + "maxLength": 32, + "pattern": "^[KEg\\$lshzxeA]*$", + }, + "persistence": map[string]any{ + "description": "When persistence is 'rdb', Redis does RDB dumps each 10 minutes if any key is changed. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is 'off', no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.", + "enum": []string{ + "off", + "rdb", + }, + "type": "string", + "title": "Redis persistence", + }, + "timeout": map[string]any{ + "default": 300, + "maximum": 31536000, + "type": "integer", + "title": "Redis idle connection timeout in seconds", + "minimum": 0, + }, + "acl_channels_default": map[string]any{ + "description": "Determines default pub/sub channels' ACL for new users if ACL is not supplied. When this option is not defined, all_channels is assumed to keep backward compatibility. This option doesn't affect Redis configuration acl-pubsub-default.", + "enum": []string{ + "allchannels", + "resetchannels", + }, + "type": "string", + "title": "Default ACL for pub/sub channels used when Redis user is created", + }, + "number_of_databases": map[string]any{ + "description": "Set number of redis databases. Changing this will cause a restart of redis service.", + "maximum": 128, + "type": "integer", + "title": "Number of redis databases", + "minimum": 1, + "example": 16, + }, + }, + Type: "object", + Title: "Redis settings", + }, +} diff --git a/operator/rediscontroller/update.go b/operator/rediscontroller/update.go index 28b54f52..b7be8f76 100644 --- a/operator/rediscontroller/update.go +++ b/operator/rediscontroller/update.go @@ -2,14 +2,14 @@ package rediscontroller import ( "context" + "encoding/json" "fmt" + exoscalesdk "github.com/exoscale/egoscale/v3" exoscalev1 "github.com/vshn/provider-exoscale/apis/exoscale/v1" "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" "github.com/crossplane/crossplane-runtime/pkg/resource" - "github.com/exoscale/egoscale/v2/oapi" - "github.com/vshn/provider-exoscale/operator/mapper" controllerruntime "sigs.k8s.io/controller-runtime" ) @@ -21,30 +21,28 @@ func (p pipeline) Update(ctx context.Context, mg resource.Managed) (managed.Exte spec := redisInstance.Spec.ForProvider ipFilter := []string(spec.IPFilter) - settings, err := mapper.ToMap(spec.RedisSettings) - if err != nil { - return managed.ExternalUpdate{}, fmt.Errorf("invalid redis settings: %w", err) + settings := exoscalesdk.JSONSchemaRedis{} + if len(spec.RedisSettings.Raw) != 0 { + err := json.Unmarshal(spec.RedisSettings.Raw, &settings) + if err != nil { + return managed.ExternalUpdate{}, fmt.Errorf("cannot map redisInstance settings: %w", err) + } } - body := oapi.UpdateDbaasServiceRedisJSONRequestBody{ - IpFilter: &ipFilter, - Maintenance: &struct { - Dow oapi.UpdateDbaasServiceRedisJSONBodyMaintenanceDow `json:"dow"` - - // Time for installing updates, UTC - Time string `json:"time"` - }{ - Dow: oapi.UpdateDbaasServiceRedisJSONBodyMaintenanceDow(spec.Maintenance.DayOfWeek), + body := exoscalesdk.UpdateDBAASServiceRedisRequest{ + IPFilter: ipFilter, + Maintenance: &exoscalesdk.UpdateDBAASServiceRedisRequestMaintenance{ + Dow: exoscalesdk.UpdateDBAASServiceRedisRequestMaintenanceDow(spec.Maintenance.DayOfWeek), Time: spec.Maintenance.TimeOfDay.String(), }, - Plan: &spec.Size.Plan, + Plan: spec.Size.Plan, RedisSettings: &settings, TerminationProtection: &spec.TerminationProtection, } - resp, err := p.exo.UpdateDbaasServiceRedisWithResponse(ctx, oapi.DbaasServiceName(redisInstance.GetInstanceName()), body) + resp, err := p.exo.UpdateDBAASServiceRedis(ctx, redisInstance.GetInstanceName(), body) if err != nil { return managed.ExternalUpdate{}, fmt.Errorf("unable to create instance: %w", err) } - log.V(1).Info("response", "body", string(resp.Body)) + log.V(1).Info("response", "message", string(resp.Message)) return managed.ExternalUpdate{}, nil }