Skip to content

Commit c7bee2e

Browse files
authored
Merge pull request #610 from ChrisdeR/feature/tke_global_config
add tke node_pool_global_config
2 parents e403967 + 6e3b075 commit c7bee2e

File tree

4 files changed

+188
-9
lines changed

4 files changed

+188
-9
lines changed

CHANGELOG.md

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,13 @@
11
## 1.55.2 (Unreleased)
2+
3+
ENHANCEMENTS:
4+
5+
* Resource: `tencentcloud_kubernetes_cluster` add `node_pool_global_config` to support node pool global config setting.
6+
27
## 1.55.1 (March 26, 2021)
38

49
ENHANCEMENTS:
10+
511
* Resource: `tencentcloud_tcr_vpc_attachment` add more time for retry.
612

713
## 1.55.0 (March 26, 2021)

tencentcloud/resource_tc_kubernetes_cluster.go

Lines changed: 168 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -207,6 +207,7 @@ import (
207207
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
208208
"github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/errors"
209209
cvm "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/cvm/v20170312"
210+
tke "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525"
210211
"github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper"
211212
)
212213

@@ -479,6 +480,65 @@ func TkeCvmCreateInfo() map[string]*schema.Schema {
479480
}
480481
}
481482

483+
func TkeNodePoolGlobalConfig() map[string]*schema.Schema {
484+
return map[string]*schema.Schema{
485+
"is_scale_in_enabled": {
486+
Type: schema.TypeBool,
487+
Optional: true,
488+
Computed: true,
489+
Description: "Indicates whether to enable scale-in.",
490+
},
491+
"expander": {
492+
Type: schema.TypeString,
493+
Optional: true,
494+
Computed: true,
495+
Description: "Indicates which scale-out method will be used when there are multiple scaling groups. Valid values: `random` - select a random scaling group, `most-pods` - select the scaling group that can schedule the most pods, `least-waste` - select the scaling group that can ensure the fewest remaining resources after Pod scheduling.",
496+
},
497+
"max_concurrent_scale_in": {
498+
Type: schema.TypeInt,
499+
Optional: true,
500+
Computed: true,
501+
Description: "Max concurrent scale-in volume.",
502+
},
503+
"scale_in_delay": {
504+
Type: schema.TypeInt,
505+
Optional: true,
506+
Computed: true,
507+
Description: "Number of minutes after cluster scale-out when the system starts judging whether to perform scale-in.",
508+
},
509+
"scale_in_unneeded_time": {
510+
Type: schema.TypeInt,
511+
Optional: true,
512+
Computed: true,
513+
Description: "Number of consecutive minutes of idleness after which the node is subject to scale-in.",
514+
},
515+
"scale_in_utilization_threshold": {
516+
Type: schema.TypeInt,
517+
Optional: true,
518+
Computed: true,
519+
Description: "Percentage of node resource usage below which the node is considered to be idle.",
520+
},
521+
"ignore_daemon_sets_utilization": {
522+
Type: schema.TypeBool,
523+
Optional: true,
524+
Computed: true,
525+
Description: "Whether to ignore DaemonSet pods by default when calculating resource usage.",
526+
},
527+
"skip_nodes_with_local_storage": {
528+
Type: schema.TypeBool,
529+
Optional: true,
530+
Computed: true,
531+
Description: "During scale-in, ignore nodes with local storage pods.",
532+
},
533+
"skip_nodes_with_system_pods": {
534+
Type: schema.TypeBool,
535+
Optional: true,
536+
Computed: true,
537+
Description: "During scale-in, ignore nodes with pods in the kube-system namespace that are not managed by DaemonSet.",
538+
},
539+
}
540+
}
541+
482542
func resourceTencentCloudTkeCluster() *schema.Resource {
483543
schemaBody := map[string]*schema.Schema{
484544
"cluster_name": {
@@ -544,6 +604,15 @@ func resourceTencentCloudTkeCluster() *schema.Resource {
544604
Default: false,
545605
Description: "Indicates whether to enable cluster node auto scaler.",
546606
},
607+
"node_pool_global_config": {
608+
Type: schema.TypeList,
609+
Optional: true,
610+
Computed: true,
611+
Elem: &schema.Resource{
612+
Schema: TkeNodePoolGlobalConfig(),
613+
},
614+
Description: "Global config effective for all node pools.",
615+
},
547616
"cluster_extra_args": {
548617
Type: schema.TypeList,
549618
ForceNew: true,
@@ -1115,6 +1184,43 @@ func tkeGetCvmRunInstancesPara(dMap map[string]interface{}, meta interface{},
11151184
return
11161185
}
11171186

1187+
func tkeGetNodePoolGlobalConfig(d *schema.ResourceData) *tke.ModifyClusterAsGroupOptionAttributeRequest {
1188+
request := tke.NewModifyClusterAsGroupOptionAttributeRequest()
1189+
request.ClusterId = helper.String(d.Id())
1190+
1191+
clusterAsGroupOption := &tke.ClusterAsGroupOption{}
1192+
if v, ok := d.GetOkExists("node_pool_global_config.0.is_scale_in_enabled"); ok {
1193+
clusterAsGroupOption.IsScaleDownEnabled = helper.Bool(v.(bool))
1194+
}
1195+
if v, ok := d.GetOkExists("node_pool_global_config.0.expander"); ok {
1196+
clusterAsGroupOption.Expander = helper.String(v.(string))
1197+
}
1198+
if v, ok := d.GetOkExists("node_pool_global_config.0.max_concurrent_scale_in"); ok {
1199+
clusterAsGroupOption.MaxEmptyBulkDelete = helper.IntInt64(v.(int))
1200+
}
1201+
if v, ok := d.GetOkExists("node_pool_global_config.0.scale_in_delay"); ok {
1202+
clusterAsGroupOption.ScaleDownDelay = helper.IntInt64(v.(int))
1203+
}
1204+
if v, ok := d.GetOkExists("node_pool_global_config.0.scale_in_unneeded_time"); ok {
1205+
clusterAsGroupOption.ScaleDownUnneededTime = helper.IntInt64(v.(int))
1206+
}
1207+
if v, ok := d.GetOkExists("node_pool_global_config.0.scale_in_utilization_threshold"); ok {
1208+
clusterAsGroupOption.ScaleDownUtilizationThreshold = helper.IntInt64(v.(int))
1209+
}
1210+
if v, ok := d.GetOkExists("node_pool_global_config.0.ignore_daemon_sets_utilization"); ok {
1211+
clusterAsGroupOption.IgnoreDaemonSetsUtilization = helper.Bool(v.(bool))
1212+
}
1213+
if v, ok := d.GetOkExists("node_pool_global_config.0.skip_nodes_with_local_storage"); ok {
1214+
clusterAsGroupOption.SkipNodesWithLocalStorage = helper.Bool(v.(bool))
1215+
}
1216+
if v, ok := d.GetOkExists("node_pool_global_config.0.skip_nodes_with_system_pods"); ok {
1217+
clusterAsGroupOption.SkipNodesWithSystemPods = helper.Bool(v.(bool))
1218+
}
1219+
1220+
request.ClusterAsGroupOption = clusterAsGroupOption
1221+
return request
1222+
}
1223+
11181224
func resourceTencentCloudTkeClusterCreate(d *schema.ResourceData, meta interface{}) error {
11191225
defer logElapsed("resource.tencentcloud_kubernetes_cluster.create")()
11201226

@@ -1453,6 +1559,21 @@ func resourceTencentCloudTkeClusterCreate(d *schema.ResourceData, meta interface
14531559
}
14541560
}
14551561

1562+
//Modify node pool global config
1563+
if _, ok := d.GetOk("node_pool_global_config"); ok {
1564+
request := tkeGetNodePoolGlobalConfig(d)
1565+
err = resource.Retry(writeRetryTimeout, func() *resource.RetryError {
1566+
inErr := service.ModifyClusterNodePoolGlobalConfig(ctx, request)
1567+
if inErr != nil {
1568+
return retryError(inErr)
1569+
}
1570+
return nil
1571+
})
1572+
if err != nil {
1573+
return err
1574+
}
1575+
}
1576+
14561577
if err = resourceTencentCloudTkeClusterRead(d, meta); err != nil {
14571578
log.Printf("[WARN]%s resource.kubernetes_cluster.read after create fail , %s", logId, err.Error())
14581579
return err
@@ -1605,6 +1726,37 @@ func resourceTencentCloudTkeClusterRead(d *schema.ResourceData, meta interface{}
16051726
_ = d.Set("cluster_intranet", true)
16061727
}
16071728

1729+
var globalConfig *tke.ClusterAsGroupOption
1730+
err = resource.Retry(readRetryTimeout, func() *resource.RetryError {
1731+
globalConfig, err = service.DescribeClusterNodePoolGlobalConfig(ctx, d.Id())
1732+
if e, ok := err.(*errors.TencentCloudSDKError); ok {
1733+
if e.GetCode() == "InternalError.ClusterNotFound" {
1734+
return nil
1735+
}
1736+
}
1737+
if err != nil {
1738+
return resource.RetryableError(err)
1739+
}
1740+
return nil
1741+
})
1742+
if err != nil {
1743+
return err
1744+
}
1745+
1746+
if globalConfig != nil {
1747+
temp := make(map[string]interface{})
1748+
temp["is_scale_in_enabled"] = globalConfig.IsScaleDownEnabled
1749+
temp["expander"] = globalConfig.Expander
1750+
temp["max_concurrent_scale_in"] = globalConfig.MaxEmptyBulkDelete
1751+
temp["scale_in_delay"] = globalConfig.ScaleDownDelay
1752+
temp["scale_in_unneeded_time"] = globalConfig.ScaleDownUnneededTime
1753+
temp["scale_in_utilization_threshold"] = globalConfig.ScaleDownUtilizationThreshold
1754+
temp["ignore_daemon_sets_utilization"] = globalConfig.IgnoreDaemonSetsUtilization
1755+
temp["skip_nodes_with_local_storage"] = globalConfig.SkipNodesWithLocalStorage
1756+
temp["skip_nodes_with_system_pods"] = globalConfig.SkipNodesWithSystemPods
1757+
1758+
_ = d.Set("node_pool_global_config", []map[string]interface{}{temp})
1759+
}
16081760
return nil
16091761
}
16101762

@@ -1925,6 +2077,22 @@ func resourceTencentCloudTkeClusterUpdate(d *schema.ResourceData, meta interface
19252077
}
19262078
}
19272079

2080+
// update node pool global config
2081+
if d.HasChange("node_pool_global_config") {
2082+
request := tkeGetNodePoolGlobalConfig(d)
2083+
err := resource.Retry(writeRetryTimeout, func() *resource.RetryError {
2084+
inErr := tkeService.ModifyClusterNodePoolGlobalConfig(ctx, request)
2085+
if inErr != nil {
2086+
return retryError(inErr)
2087+
}
2088+
return nil
2089+
})
2090+
if err != nil {
2091+
return err
2092+
}
2093+
d.SetPartial("node_pool_global_config")
2094+
}
2095+
19282096
d.Partial(false)
19292097
if err := resourceTencentCloudTkeClusterRead(d, meta); err != nil {
19302098
log.Printf("[WARN]%s resource.kubernetes_cluster.read after update fail , %s", logId, err.Error())

tencentcloud/service_tencentcloud_tke.go

Lines changed: 1 addition & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1115,21 +1115,13 @@ func (me *TkeService) DescribeNodePool(ctx context.Context, clusterId string, no
11151115
}
11161116

11171117
//node pool global config
1118-
func (me *TkeService) ModifyClusterNodePoolGlobalConfig(ctx context.Context, clusterId string, isScaleDown bool, expanderStrategy string) (errRet error) {
1119-
1118+
func (me *TkeService) ModifyClusterNodePoolGlobalConfig(ctx context.Context, request *tke.ModifyClusterAsGroupOptionAttributeRequest) (errRet error) {
11201119
logId := getLogId(ctx)
1121-
request := tke.NewModifyClusterAsGroupOptionAttributeRequest()
1122-
11231120
defer func() {
11241121
if errRet != nil {
11251122
log.Printf("[CRITAL]%s api[%s] fail, reason[%s]\n", logId, request.GetAction(), errRet.Error())
11261123
}
11271124
}()
1128-
request.ClusterId = &clusterId
1129-
request.ClusterAsGroupOption = &tke.ClusterAsGroupOption{
1130-
IsScaleDownEnabled: &isScaleDown,
1131-
Expander: &expanderStrategy,
1132-
}
11331125

11341126
ratelimit.Check(request.GetAction())
11351127
_, err := me.client.UseTkeClient().ModifyClusterAsGroupOptionAttribute(request)

website/docs/r/kubernetes_cluster.html.markdown

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -237,6 +237,7 @@ The following arguments are supported:
237237
* `mount_target` - (Optional, ForceNew) Mount target. Default is not mounting.
238238
* `network_type` - (Optional, ForceNew) Cluster network type, GR or VPC-CNI. Default is GR.
239239
* `node_name_type` - (Optional, ForceNew) Node name type of Cluster, the available values include: 'lan-ip' and 'hostname', Default is 'lan-ip'.
240+
* `node_pool_global_config` - (Optional) Global config effective for all node pools.
240241
* `project_id` - (Optional) Project ID, default value is 0.
241242
* `service_cidr` - (Optional, ForceNew) A network address block of the service. Different from vpc cidr and cidr of other clusters within this vpc. Must be in 10./192.168/172.[16-31] segments.
242243
* `tags` - (Optional) The tags of the cluster.
@@ -281,6 +282,18 @@ The `master_config` object supports the following:
281282
* `system_disk_type` - (Optional, ForceNew) System disk type. For more information on limits of system disk types, see [Storage Overview](https://intl.cloud.tencent.com/document/product/213/4952). Valid values: `LOCAL_BASIC`: local disk, `LOCAL_SSD`: local SSD disk, `CLOUD_BASIC`: HDD cloud disk, `CLOUD_SSD`: SSD, `CLOUD_PREMIUM`: Premium Cloud Storage. NOTE: `LOCAL_BASIC` and `LOCAL_SSD` are deprecated.
282283
* `user_data` - (Optional, ForceNew) ase64-encoded User Data text, the length limit is 16KB.
283284

285+
The `node_pool_global_config` object supports the following:
286+
287+
* `expander` - (Optional) Indicates which scale-out method will be used when there are multiple scaling groups. Valid values: `random` - select a random scaling group, `most-pods` - select the scaling group that can schedule the most pods, `least-waste` - select the scaling group that can ensure the fewest remaining resources after Pod scheduling.
288+
* `ignore_daemon_sets_utilization` - (Optional) Whether to ignore DaemonSet pods by default when calculating resource usage.
289+
* `is_scale_in_enabled` - (Optional) Indicates whether to enable scale-in.
290+
* `max_concurrent_scale_in` - (Optional) Max concurrent scale-in volume.
291+
* `scale_in_delay` - (Optional) Number of minutes after cluster scale-out when the system starts judging whether to perform scale-in.
292+
* `scale_in_unneeded_time` - (Optional) Number of consecutive minutes of idleness after which the node is subject to scale-in.
293+
* `scale_in_utilization_threshold` - (Optional) Percentage of node resource usage below which the node is considered to be idle.
294+
* `skip_nodes_with_local_storage` - (Optional) During scale-in, ignore nodes with local storage pods.
295+
* `skip_nodes_with_system_pods` - (Optional) During scale-in, ignore nodes with pods in the kube-system namespace that are not managed by DaemonSet.
296+
284297
The `worker_config` object supports the following:
285298

286299
* `instance_type` - (Required, ForceNew) Specified types of CVM instance.

0 commit comments

Comments
 (0)