Skip to content

Commit 60b7d31

Browse files
authored
Merge pull request #675 from tencentcloudstack/feat/nodepool-asg-edit
feat: tke cluster node pool - extend auto scaling group partial params
2 parents 86ddf67 + 78ac93e commit 60b7d31

File tree

4 files changed

+168
-6
lines changed

4 files changed

+168
-6
lines changed

tencentcloud/resource_tc_kubernetes_node_pool.go

Lines changed: 107 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -410,6 +410,36 @@ func ResourceTencentCloudKubernetesNodePool() *schema.Resource {
410410
Default: "GENERAL",
411411
Description: "The image version of the node. Valida values are `DOCKER_CUSTOMIZE` and `GENERAL`. Default is `GENERAL`. This parameter will only affect new nodes, not including the existing nodes.",
412412
},
413+
// asg pass through arguments
414+
"scaling_group_name": {
415+
Type: schema.TypeString,
416+
Optional: true,
417+
Description: "Name of relative scaling group.",
418+
},
419+
"zones": {
420+
Type: schema.TypeList,
421+
Optional: true,
422+
Description: "List of auto scaling group available zones, for Basic network it is required.",
423+
Elem: &schema.Schema{Type: schema.TypeString},
424+
},
425+
"scaling_group_project_id": {
426+
Type: schema.TypeInt,
427+
Optional: true,
428+
Default: 0,
429+
Description: "Project ID the scaling group belongs to.",
430+
},
431+
"default_cooldown": {
432+
Type: schema.TypeInt,
433+
Optional: true,
434+
Description: "Seconds of scaling group cool down. Default value is `300`.",
435+
},
436+
"termination_policies": {
437+
Type: schema.TypeList,
438+
MaxItems: 1,
439+
Optional: true,
440+
Description: "Policy of scaling group termination. Available values: `[\"OLDEST_INSTANCE\"]`, `[\"NEWEST_INSTANCE\"]`.",
441+
Elem: &schema.Schema{Type: schema.TypeString},
442+
},
413443
//computed
414444
"status": {
415445
Type: schema.TypeString,
@@ -609,6 +639,7 @@ func resourceKubernetesNodePoolRead(d *schema.ResourceData, meta interface{}) er
609639
logId = getLogId(contextNil)
610640
ctx = context.WithValue(context.TODO(), logIdKey, logId)
611641
service = TkeService{client: meta.(*TencentCloudClient).apiV3Conn}
642+
asService = AsService{client: meta.(*TencentCloudClient).apiV3Conn}
612643
items = strings.Split(d.Id(), FILED_SP)
613644
)
614645
if len(items) != 2 {
@@ -678,6 +709,30 @@ func resourceKubernetesNodePoolRead(d *schema.ResourceData, meta interface{}) er
678709
}
679710
d.Set("labels", lables)
680711

712+
// Relative scaling group status
713+
asg, hasAsg, err := asService.DescribeAutoScalingGroupById(ctx, *nodePool.AutoscalingGroupId)
714+
if err != nil {
715+
err = resource.Retry(readRetryTimeout, func() *resource.RetryError {
716+
asg, hasAsg, err = asService.DescribeAutoScalingGroupById(ctx, *nodePool.AutoscalingGroupId)
717+
if err != nil {
718+
return retryError(err)
719+
}
720+
return nil
721+
})
722+
}
723+
724+
if err != nil {
725+
return nil
726+
}
727+
728+
if hasAsg >= 1 {
729+
_ = d.Set("scaling_group_name", asg.AutoScalingGroupName)
730+
_ = d.Set("zones", asg.ZoneSet)
731+
_ = d.Set("scaling_group_project_id", asg.ProjectId)
732+
_ = d.Set("default_cooldown", asg.DefaultCooldown)
733+
_ = d.Set("termination_policies", asg.TerminationPolicySet)
734+
}
735+
681736
taints := make([]map[string]interface{}, len(nodePool.Taints))
682737
for i, v := range nodePool.Taints {
683738
taint := map[string]interface{}{
@@ -797,6 +852,7 @@ func resourceKubernetesNodePoolUpdate(d *schema.ResourceData, meta interface{})
797852
logId = getLogId(contextNil)
798853
ctx = context.WithValue(context.TODO(), logIdKey, logId)
799854
service = TkeService{client: meta.(*TencentCloudClient).apiV3Conn}
855+
asService = AsService{client: meta.(*TencentCloudClient).apiV3Conn}
800856
items = strings.Split(d.Id(), FILED_SP)
801857
)
802858
if len(items) != 2 {
@@ -836,6 +892,57 @@ func resourceKubernetesNodePoolUpdate(d *schema.ResourceData, meta interface{})
836892
d.SetPartial("taints")
837893
}
838894

895+
if d.HasChange("scaling_group_name") ||
896+
d.HasChange("zones") ||
897+
d.HasChange("scaling_group_project_id") ||
898+
d.HasChange("default_cooldown") ||
899+
d.HasChange("termination_policies") {
900+
901+
nodePool, _, err := service.DescribeNodePool(ctx, clusterId, nodePoolId)
902+
if err != nil {
903+
return err
904+
}
905+
906+
var (
907+
scalingGroupId = *nodePool.AutoscalingGroupId
908+
name = d.Get("scaling_group_name").(string)
909+
projectId = d.Get("scaling_group_project_id").(int)
910+
defaultCooldown = d.Get("default_cooldown").(int)
911+
zones []*string
912+
terminationPolicy []*string
913+
)
914+
915+
if v, ok := d.GetOk("zones"); ok {
916+
for _, zone := range v.([]interface{}) {
917+
zones = append(zones, helper.String(zone.(string)))
918+
}
919+
}
920+
921+
if v, ok := d.GetOk("termination_policy"); ok {
922+
for _, policy := range v.([]interface{}) {
923+
terminationPolicy = append(terminationPolicy, helper.String(policy.(string)))
924+
}
925+
}
926+
927+
928+
err = resource.Retry(writeRetryTimeout, func() *resource.RetryError {
929+
errRet := asService.ModifyScalingGroup(ctx, scalingGroupId, name, projectId, defaultCooldown, zones, terminationPolicy)
930+
if errRet != nil {
931+
return retryError(errRet)
932+
}
933+
return nil
934+
})
935+
936+
if err != nil {
937+
return err
938+
}
939+
d.SetPartial("scaling_group_name")
940+
d.SetPartial("zones")
941+
d.SetPartial("scaling_group_project_id")
942+
d.SetPartial("default_cooldown")
943+
d.SetPartial("termination_policies")
944+
}
945+
839946
if d.HasChange("desired_capacity") {
840947
desiredCapacity := int64(d.Get("desired_capacity").(int))
841948
err := resource.Retry(writeRetryTimeout, func() *resource.RetryError {

tencentcloud/resource_tc_kubernetes_node_pool_test.go

Lines changed: 20 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,10 @@ func TestAccTencentCloudTkeNodePoolResource(t *testing.T) {
3434
resource.TestCheckResourceAttr(testTkeClusterNodePoolResourceKey, "desired_capacity", "1"),
3535
resource.TestCheckResourceAttr(testTkeClusterNodePoolResourceKey, "name", "mynodepool"),
3636
resource.TestCheckResourceAttr(testTkeClusterNodePoolResourceKey, "unschedulable", "0"),
37+
resource.TestCheckResourceAttr(testTkeClusterNodePoolResourceKey, "scaling_group_name", "basic_group"),
38+
resource.TestCheckResourceAttr(testTkeClusterNodePoolResourceKey, "default_cooldown", "400"),
39+
resource.TestCheckResourceAttr(testTkeClusterNodePoolResourceKey, "termination_policies.#", "1"),
40+
resource.TestCheckResourceAttr(testTkeClusterNodePoolResourceKey, "termination_policies.0", "OLDEST_INSTANCE"),
3741
),
3842
},
3943
{
@@ -50,6 +54,10 @@ func TestAccTencentCloudTkeNodePoolResource(t *testing.T) {
5054
resource.TestCheckResourceAttr(testTkeClusterNodePoolResourceKey, "name", "mynodepoolupdate"),
5155
resource.TestCheckResourceAttr(testTkeClusterNodePoolResourceKey, "node_os", "ubuntu18.04.1x86_64"),
5256
resource.TestCheckResourceAttr(testTkeClusterNodePoolResourceKey, "unschedulable", "1"),
57+
resource.TestCheckResourceAttr(testTkeClusterNodePoolResourceKey, "scaling_group_name", "basic_group_test"),
58+
resource.TestCheckResourceAttr(testTkeClusterNodePoolResourceKey, "default_cooldown", "350"),
59+
resource.TestCheckResourceAttr(testTkeClusterNodePoolResourceKey, "termination_policies.#", "1"),
60+
resource.TestCheckResourceAttr(testTkeClusterNodePoolResourceKey, "termination_policies.0", "NEWEST_INSTANCE"),
5361
),
5462
},
5563
},
@@ -189,6 +197,9 @@ resource "tencentcloud_kubernetes_node_pool" "np_test" {
189197
retry_policy = "INCREMENTAL_INTERVALS"
190198
desired_capacity = 1
191199
enable_auto_scale = true
200+
scaling_group_name = "basic_group"
201+
default_cooldown = 400
202+
termination_policies = ["OLDEST_INSTANCE"]
192203
193204
auto_scaling_config {
194205
instance_type = var.default_instance_type
@@ -222,9 +233,9 @@ resource "tencentcloud_kubernetes_node_pool" "np_test" {
222233
}
223234
224235
node_config {
225-
extra_args = [
226-
"root-dir=/var/lib/kubelet"
227-
]
236+
extra_args = [
237+
"root-dir=/var/lib/kubelet"
238+
]
228239
}
229240
}
230241
@@ -243,6 +254,9 @@ resource "tencentcloud_kubernetes_node_pool" "np_test" {
243254
enable_auto_scale = false
244255
node_os = "ubuntu18.04.1x86_64"
245256
delete_keep_instance = true
257+
scaling_group_name = "basic_group_test"
258+
default_cooldown = 350
259+
termination_policies = ["NEWEST_INSTANCE"]
246260
247261
auto_scaling_config {
248262
instance_type = var.default_instance_type
@@ -270,9 +284,9 @@ resource "tencentcloud_kubernetes_node_pool" "np_test" {
270284
}
271285
272286
node_config {
273-
extra_args = [
274-
"root-dir=/var/lib/kubelet"
275-
]
287+
extra_args = [
288+
"root-dir=/var/lib/kubelet"
289+
]
276290
}
277291
}
278292
`

tencentcloud/service_tencentcloud_as.go

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -440,6 +440,42 @@ func (me *AsService) DescribeScheduledActionById(ctx context.Context, scheduledA
440440
return
441441
}
442442

443+
func (me *AsService) ModifyScalingGroup(ctx context.Context, id string, name string, projectId int, cooldown int, zones []*string, terminatePolicy []*string) error {
444+
logId := getLogId(ctx)
445+
request := as.NewModifyAutoScalingGroupRequest()
446+
447+
request.AutoScalingGroupId = helper.String(id)
448+
449+
if name != "" {
450+
request.AutoScalingGroupName = helper.String(name)
451+
}
452+
453+
if projectId != 0 {
454+
request.ProjectId = helper.IntUint64(projectId)
455+
}
456+
457+
if cooldown != 0 {
458+
request.DefaultCooldown = helper.IntUint64(cooldown)
459+
}
460+
461+
if len(zones) != 0 {
462+
request.Zones = zones
463+
}
464+
465+
if len(terminatePolicy) != 0 {
466+
request.TerminationPolicies = terminatePolicy[:1]
467+
}
468+
469+
ratelimit.Check(request.GetAction())
470+
_, err := me.client.UseAsClient().ModifyAutoScalingGroup(request)
471+
if err != nil {
472+
log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n",
473+
logId, request.GetAction(), request.ToJsonString(), err.Error())
474+
return err
475+
}
476+
return nil
477+
}
478+
443479
func (me *AsService) DeleteScheduledAction(ctx context.Context, scheduledActonId string) error {
444480
logId := getLogId(ctx)
445481
request := as.NewDeleteScheduledActionRequest()

website/docs/r/kubernetes_node_pool.html.markdown

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -112,6 +112,7 @@ The following arguments are supported:
112112
* `min_size` - (Required) Minimum number of node.
113113
* `name` - (Required, ForceNew) Name of the node pool. The name does not exceed 25 characters, and only supports Chinese, English, numbers, underscores, separators (`-`) and decimal points.
114114
* `vpc_id` - (Required, ForceNew) ID of VPC network.
115+
* `default_cooldown` - (Optional) Seconds of scaling group cool down. Default value is `300`.
115116
* `delete_keep_instance` - (Optional) Indicate to keep the CVM instance when delete the node pool. Default is `true`.
116117
* `desired_capacity` - (Optional) Desired capacity ot the node. If `enable_auto_scale` is set `true`, this will be a computed parameter.
117118
* `enable_auto_scale` - (Optional) Indicate whether to enable auto scaling or not.
@@ -120,10 +121,14 @@ The following arguments are supported:
120121
* `node_os_type` - (Optional) The image version of the node. Valida values are `DOCKER_CUSTOMIZE` and `GENERAL`. Default is `GENERAL`. This parameter will only affect new nodes, not including the existing nodes.
121122
* `node_os` - (Optional) Operating system of the cluster, the available values include: `tlinux2.4x86_64`, `ubuntu18.04.1x86_64`, `ubuntu16.04.1 LTSx86_64`, `centos7.6.0_x64` and `centos7.2x86_64`. Default is 'tlinux2.4x86_64'. This parameter will only affect new nodes, not including the existing nodes.
122123
* `retry_policy` - (Optional, ForceNew) Available values for retry policies include `IMMEDIATE_RETRY` and `INCREMENTAL_INTERVALS`.
124+
* `scaling_group_name` - (Optional) Name of relative scaling group.
125+
* `scaling_group_project_id` - (Optional) Project ID the scaling group belongs to.
123126
* `scaling_mode` - (Optional, ForceNew) Auto scaling mode. Valid values are `CLASSIC_SCALING`(scaling by create/destroy instances), `WAKE_UP_STOPPED_SCALING`(Boot priority for expansion. When expanding the capacity, the shutdown operation is given priority to the shutdown of the instance. If the number of instances is still lower than the expected number of instances after the startup, the instance will be created, and the method of destroying the instance will still be used for shrinking).
124127
* `subnet_ids` - (Optional, ForceNew) ID list of subnet, and for VPC it is required.
125128
* `taints` - (Optional) Taints of kubernetes node pool created nodes.
129+
* `termination_policies` - (Optional) Policy of scaling group termination. Available values: `["OLDEST_INSTANCE"]`, `["NEWEST_INSTANCE"]`.
126130
* `unschedulable` - (Optional, ForceNew) Sets whether the joining node participates in the schedule. Default is '0'. Participate in scheduling.
131+
* `zones` - (Optional) List of auto scaling group available zones, for Basic network it is required.
127132

128133
The `auto_scaling_config` object supports the following:
129134

0 commit comments

Comments
 (0)