Skip to content

Commit 1023cb5

Browse files
authored
scale_worker add advancedInstanceSetting (#587)
1 parent 4920875 commit 1023cb5

10 files changed

+216
-49
lines changed

CHANGELOG.md

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,16 @@
11
## 1.53.4 (Unreleased)
2+
3+
ENHANCEMENTS:
4+
5+
* Resource `tencentcloud_kubernetes_scale_worker` add `data_disk`, `docker_graph_path` to support advanced instance setting.
6+
* Resource `tencentcloud_instance` add tags to the disks created with the instance.
7+
8+
BUG FIXES:
9+
10+
* Resource: `tencentcloud_kubernetes_cluster_attachment` fix bug that only one extra argument set successfully.
11+
* Resource: `tencentcloud_as_scaling_policy` fix bug that missing required parameters error happened when update metric parameters.
12+
13+
214
## 1.53.3 (February 02, 2021)
315

416
ENHANCEMENTS:

tencentcloud/resource_tc_as_scaling_policy.go

Lines changed: 8 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -223,16 +223,14 @@ func resourceTencentCloudAsScalingPolicyUpdate(d *schema.ResourceData, meta inte
223223
request.AdjustmentValue = &adjustmentValue
224224
}
225225
request.MetricAlarm = &as.MetricAlarm{}
226-
if d.HasChange("comparison_operator") || d.HasChange("threshold") || d.HasChange("metric_name") || d.HasChange("period") ||
227-
d.HasChange("continuous_time") || d.HasChange("statistic") {
228-
//these two parameter must pass together
229-
request.MetricAlarm.ComparisonOperator = helper.String(d.Get("comparison_operator").(string))
230-
request.MetricAlarm.Threshold = helper.IntUint64(d.Get("threshold").(int))
231-
request.MetricAlarm.MetricName = helper.String(d.Get("metric_name").(string))
232-
request.MetricAlarm.Period = helper.IntUint64(d.Get("period").(int))
233-
request.MetricAlarm.ContinuousTime = helper.IntUint64(d.Get("continuous_time").(int))
234-
request.MetricAlarm.Statistic = helper.String(d.Get("statistic").(string))
235-
}
226+
227+
//these two parameter must pass together
228+
request.MetricAlarm.ComparisonOperator = helper.String(d.Get("comparison_operator").(string))
229+
request.MetricAlarm.Threshold = helper.IntUint64(d.Get("threshold").(int))
230+
request.MetricAlarm.MetricName = helper.String(d.Get("metric_name").(string))
231+
request.MetricAlarm.Period = helper.IntUint64(d.Get("period").(int))
232+
request.MetricAlarm.ContinuousTime = helper.IntUint64(d.Get("continuous_time").(int))
233+
request.MetricAlarm.Statistic = helper.String(d.Get("statistic").(string))
236234

237235
if d.HasChange("cooldown") {
238236
request.Cooldown = helper.IntUint64(d.Get("cooldown").(int))

tencentcloud/resource_tc_container_cluster_instance.go

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,7 @@ import (
3939
"github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common"
4040
"github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/errors"
4141
cvm "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/cvm/v20170312"
42+
tke "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525"
4243
"github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper"
4344
)
4445

@@ -323,7 +324,7 @@ func resourceTencentCloudContainerClusterInstancesCreate(d *schema.ResourceData,
323324
runInstancesPara.Placement = &place
324325
runInstancesPara.InstanceCount = common.Int64Ptr(1)
325326

326-
var iAdvanced InstanceAdvancedSettings
327+
var iAdvanced tke.InstanceAdvancedSettings
327328
var cvms RunInstancesForNode
328329

329330
if v, ok := d.GetOkExists("vpc_id"); ok {
@@ -445,19 +446,19 @@ func resourceTencentCloudContainerClusterInstancesCreate(d *schema.ResourceData,
445446
}
446447

447448
if v, ok := d.GetOkExists("mount_target"); ok {
448-
iAdvanced.MountTarget = v.(string)
449+
iAdvanced.MountTarget = helper.String(v.(string))
449450
}
450451

451452
if v, ok := d.GetOkExists("docker_graph_path"); ok {
452-
iAdvanced.DockerGraphPath = v.(string)
453+
iAdvanced.DockerGraphPath = helper.String(v.(string))
453454
}
454455

455456
if v, ok := d.GetOkExists("user_script"); ok {
456-
iAdvanced.UserScript = v.(string)
457+
iAdvanced.UserScript = helper.String(v.(string))
457458
}
458459

459460
if v, ok := d.GetOkExists("unschedulable"); ok {
460-
iAdvanced.Unschedulable = int64(v.(int))
461+
iAdvanced.Unschedulable = helper.IntInt64(v.(int))
461462
}
462463

463464
runInstancesParas := runInstancesPara.ToJsonString()

tencentcloud/resource_tc_cynosdb_cluster.go

Lines changed: 31 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,7 @@ import (
6060

6161
"github.com/hashicorp/terraform-plugin-sdk/helper/resource"
6262
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
63+
"github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/errors"
6364
cynosdb "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/cynosdb/v20190107"
6465
"github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper"
6566
"github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/ratelimit"
@@ -133,6 +134,11 @@ func resourceTencentCloudCynosdbClusterCreate(d *schema.ResourceData, meta inter
133134
ratelimit.Check(request.GetAction())
134135
response, err = meta.(*TencentCloudClient).apiV3Conn.UseCynosdbClient().CreateClusters(request)
135136
if err != nil {
137+
if e, ok := err.(*errors.TencentCloudSDKError); ok {
138+
if e.GetCode() == "InvalidParameterValue.DealNameNotFound" {
139+
return resource.RetryableError(fmt.Errorf("waiting billing status, retry..."))
140+
}
141+
}
136142
log.Printf("[CRITAL]%s api[%s] fail, reason:%s", logId, request.GetAction(), err.Error())
137143
return retryError(err)
138144
}
@@ -141,11 +147,33 @@ func resourceTencentCloudCynosdbClusterCreate(d *schema.ResourceData, meta inter
141147
if err != nil {
142148
return err
143149
}
144-
if response != nil && response.Response != nil && len(response.Response.ClusterIds) != 1 {
150+
if response != nil && response.Response != nil && len(response.Response.DealNames) != 1 {
145151
return fmt.Errorf("cynosdb cluster id count isn't 1")
146152
}
147-
d.SetId(*response.Response.ClusterIds[0])
148-
id := d.Id()
153+
//after 1.53.3 the response is async
154+
dealName := response.Response.DealNames[0]
155+
dealReq := cynosdb.NewDescribeResourcesByDealNameRequest()
156+
dealRes := cynosdb.NewDescribeResourcesByDealNameResponse()
157+
dealReq.DealName = dealName
158+
err = resource.Retry(readRetryTimeout, func() *resource.RetryError {
159+
ratelimit.Check(request.GetAction())
160+
dealRes, err = meta.(*TencentCloudClient).apiV3Conn.UseCynosdbClient().DescribeResourcesByDealName(dealReq)
161+
if err != nil {
162+
log.Printf("[CRITAL]%s api[%s] fail, reason:%s", logId, request.GetAction(), err.Error())
163+
return retryError(err)
164+
}
165+
return nil
166+
})
167+
if err != nil {
168+
return err
169+
}
170+
171+
if dealRes != nil && dealRes.Response != nil && len(dealRes.Response.BillingResourceInfos) != 1 {
172+
return fmt.Errorf("cynosdb cluster id count isn't 1")
173+
}
174+
175+
id := *dealRes.Response.BillingResourceInfos[0].ClusterId
176+
d.SetId(id)
149177

150178
_, _, has, err := cynosdbService.DescribeClusterById(ctx, id)
151179
if err != nil {

tencentcloud/resource_tc_instance.go

Lines changed: 63 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -435,6 +435,7 @@ func resourceTencentCloudInstanceCreate(d *schema.ResourceData, meta interface{}
435435
if v, ok := d.GetOk("hostname"); ok {
436436
request.HostName = helper.String(v.(string))
437437
}
438+
438439
if v, ok := d.GetOk("instance_charge_type"); ok {
439440
instanceChargeType := v.(string)
440441
request.InstanceChargeType = &instanceChargeType
@@ -617,13 +618,27 @@ func resourceTencentCloudInstanceCreate(d *schema.ResourceData, meta interface{}
617618
d.SetId(instanceId)
618619

619620
// wait for status
621+
//get system disk ID and data disk ID
622+
var systemDiskId string
623+
var dataDiskIds []string
620624
err = resource.Retry(5*readRetryTimeout, func() *resource.RetryError {
621625
instance, errRet := cvmService.DescribeInstanceById(ctx, instanceId)
622626
if errRet != nil {
623627
return retryError(errRet, InternalError)
624628
}
625629
if instance != nil && (*instance.InstanceState == CVM_STATUS_RUNNING ||
626630
*instance.InstanceState == CVM_STATUS_LAUNCH_FAILED) {
631+
//get system disk ID
632+
if instance.SystemDisk != nil && instance.SystemDisk.DiskId != nil {
633+
systemDiskId = *instance.SystemDisk.DiskId
634+
}
635+
if instance.DataDisks != nil {
636+
for _, dataDisk := range instance.DataDisks {
637+
if dataDisk != nil && dataDisk.DiskId != nil {
638+
dataDiskIds = append(dataDiskIds, *dataDisk.DiskId)
639+
}
640+
}
641+
}
627642
return nil
628643
}
629644
return resource.RetryableError(fmt.Errorf("cvm instance status is %s, retry...", *instance.InstanceState))
@@ -642,6 +657,27 @@ func resourceTencentCloudInstanceCreate(d *schema.ResourceData, meta interface{}
642657
// If tags attachment failed, the user will be notified, then plan/apply/update with terraform.
643658
return err
644659
}
660+
661+
//except instance ,system disk and data disk will be tagged
662+
//keep logical consistence with the console
663+
//tag system disk
664+
if systemDiskId != "" {
665+
resourceName = BuildTagResourceName("cvm", "volume", tcClient.Region, systemDiskId)
666+
if err := tagService.ModifyTags(ctx, resourceName, tags, nil); err != nil {
667+
// If tags attachment failed, the user will be notified, then plan/apply/update with terraform.
668+
return err
669+
}
670+
}
671+
//tag disk ids
672+
for _, diskId := range dataDiskIds {
673+
if diskId != "" {
674+
resourceName = BuildTagResourceName("cvm", "volume", tcClient.Region, diskId)
675+
if err := tagService.ModifyTags(ctx, resourceName, tags, nil); err != nil {
676+
// If tags attachment failed, the user will be notified, then plan/apply/update with terraform.
677+
return err
678+
}
679+
}
680+
}
645681
}
646682

647683
if !(d.Get("running_flag").(bool)) {
@@ -1014,9 +1050,11 @@ func resourceTencentCloudInstanceUpdate(d *schema.ResourceData, meta interface{}
10141050
old, new := d.GetChange("key_name")
10151051
oldKeyId := old.(string)
10161052
keyId := new.(string)
1017-
err := cvmService.UnbindKeyPair(ctx, oldKeyId, []*string{&instanceId})
1018-
if err != nil {
1019-
return err
1053+
if oldKeyId != "" {
1054+
err := cvmService.UnbindKeyPair(ctx, oldKeyId, []*string{&instanceId})
1055+
if err != nil {
1056+
return err
1057+
}
10201058
}
10211059
err = resource.Retry(2*readRetryTimeout, func() *resource.RetryError {
10221060
instance, errRet := cvmService.DescribeInstanceById(ctx, instanceId)
@@ -1086,6 +1124,28 @@ func resourceTencentCloudInstanceUpdate(d *schema.ResourceData, meta interface{}
10861124
if err != nil {
10871125
return err
10881126
}
1127+
//except instance ,system disk and data disk will be tagged
1128+
//keep logical consistence with the console
1129+
//tag system disk
1130+
if systemDiskId, ok := d.GetOk("system_disk_id"); ok {
1131+
if systemDiskId.(string) != "" {
1132+
resourceName = BuildTagResourceName("cvm", "volume", region, systemDiskId.(string))
1133+
if err := tagService.ModifyTags(ctx, resourceName, replaceTags, deleteTags); err != nil {
1134+
return err
1135+
}
1136+
}
1137+
}
1138+
//tag disk ids
1139+
if dataDisks, ok := d.GetOk("date_disk"); ok {
1140+
dataDiskList := dataDisks.([]map[string]interface{})
1141+
for _, disk := range dataDiskList {
1142+
dataDiskId := disk["data_disk_id"].(string)
1143+
resourceName = BuildTagResourceName("cvm", "volume", region, dataDiskId)
1144+
if err := tagService.ModifyTags(ctx, resourceName, replaceTags, deleteTags); err != nil {
1145+
return err
1146+
}
1147+
}
1148+
}
10891149
d.SetPartial("tags")
10901150
}
10911151

tencentcloud/resource_tc_kubernetes_cluster.go

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -810,6 +810,7 @@ func resourceTencentCloudTkeCluster() *schema.Resource {
810810
},
811811
Description: "An information list of cvm within the 'WORKER' clusters. Each element contains the following attributes:",
812812
},
813+
//advanced instance setting
813814
"labels": {
814815
Type: schema.TypeMap,
815816
Optional: true,
@@ -843,6 +844,7 @@ func resourceTencentCloudTkeCluster() *schema.Resource {
843844
Elem: &schema.Schema{Type: schema.TypeString},
844845
Description: "Custom parameter information related to the node.",
845846
},
847+
846848
"kube_config": {
847849
Type: schema.TypeString,
848850
Computed: true,
@@ -1264,8 +1266,8 @@ func resourceTencentCloudTkeClusterCreate(d *schema.ResourceData, meta interface
12641266

12651267
if temp, ok := d.GetOk("extra_args"); ok {
12661268
extraArgs := helper.InterfacesStrings(temp.([]interface{}))
1267-
for _, extraArg := range extraArgs {
1268-
iAdvanced.ExtraArgs.Kubelet = append(iAdvanced.ExtraArgs.Kubelet, &extraArg)
1269+
for i := range extraArgs {
1270+
iAdvanced.ExtraArgs.Kubelet = append(iAdvanced.ExtraArgs.Kubelet, &extraArgs[i])
12691271
}
12701272
}
12711273

tencentcloud/resource_tc_kubernetes_cluster_attachment.go

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -295,8 +295,9 @@ func tkeGetInstanceAdvancedPara(dMap map[string]interface{}, meta interface{}) (
295295
setting.DataDisks = append(setting.DataDisks, &dataDisk)
296296
}
297297
}
298-
299-
setting.Unschedulable = helper.BoolToInt64Ptr(!dMap["is_schedule"].(bool))
298+
if v, ok := dMap["is_schedule"]; ok {
299+
setting.Unschedulable = helper.BoolToInt64Ptr(!v.(bool))
300+
}
300301

301302
if v, ok := dMap["user_data"]; ok {
302303
setting.UserScript = helper.String(v.(string))
@@ -310,8 +311,8 @@ func tkeGetInstanceAdvancedPara(dMap map[string]interface{}, meta interface{}) (
310311
extraArgs := helper.InterfacesStrings(temp.([]interface{}))
311312
clusterExtraArgs := tke.InstanceExtraArgs{}
312313
clusterExtraArgs.Kubelet = make([]*string, 0)
313-
for _, extraArg := range extraArgs {
314-
clusterExtraArgs.Kubelet = append(clusterExtraArgs.Kubelet, &extraArg)
314+
for i := range extraArgs {
315+
clusterExtraArgs.Kubelet = append(clusterExtraArgs.Kubelet, &extraArgs[i])
315316
}
316317
setting.ExtraArgs = &clusterExtraArgs
317318
}

0 commit comments

Comments
 (0)