Skip to content

Commit 3cf6caf

Browse files
Qingping Houhouqp
authored andcommitted
add github action to format and lint code
1 parent ef2e5c6 commit 3cf6caf

File tree

5 files changed

+227
-214
lines changed

5 files changed

+227
-214
lines changed

.github/workflows/build.yml

Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
name: 'CI'
2+
on:
3+
push:
4+
branches: [ main ]
5+
tags: [ '*' ]
6+
pull_request:
7+
branches: [ main ]
8+
jobs:
9+
terraform:
10+
name: 'Terraform'
11+
runs-on: ubuntu-latest
12+
steps:
13+
- name: 'Checkout'
14+
uses: actions/checkout@v2
15+
16+
- uses: hashicorp/setup-terraform@v1
17+
with:
18+
terraform_version: 0.12.25
19+
- run: terraform fmt
20+
- run: terraform init
21+
22+
- name: tflint
23+
uses: reviewdog/[email protected]
24+
with:
25+
github_token: ${{ secrets.github_token }}
26+
python:
27+
name: 'Python'
28+
runs-on: ubuntu-latest
29+
steps:
30+
- name: 'Checkout'
31+
uses: actions/checkout@v2
32+
- name: Set up Python environment
33+
uses: actions/setup-python@v1
34+
with:
35+
python-version: "3.8"
36+
- run: pip install flake8 mypy black
37+
- name: black format
38+
run: black --check lambdas
39+
- name: flake8
40+
run: flake8 --ignore E501
41+
- name: mypy static analysis
42+
run: mypy --ignore-missing-imports --follow-imports skip ./lambdas

lambdas/checkNodesForRunningPods.py

Lines changed: 69 additions & 75 deletions
Original file line numberDiff line numberDiff line change
@@ -14,47 +14,35 @@
1414
logger = logging.getLogger(__name__)
1515
logger.setLevel(logging.DEBUG)
1616
# Configure your cluster name and region here
17-
KUBE_FILEPATH = '/tmp/kubeconfig'
17+
KUBE_FILEPATH = "/tmp/kubeconfig"
1818
MIRROR_POD_ANNOTATION_KEY = "kubernetes.io/config.mirror"
1919
CONTROLLER_KIND_DAEMON_SET = "DaemonSet"
2020

21+
2122
def create_kube_config(eks, cluster_name):
2223
"""Creates the Kubernetes config file required when instantiating the API client."""
23-
cluster_info = eks.describe_cluster(name=cluster_name)['cluster']
24-
certificate = cluster_info['certificateAuthority']['data']
25-
endpoint = cluster_info['endpoint']
24+
cluster_info = eks.describe_cluster(name=cluster_name)["cluster"]
25+
certificate = cluster_info["certificateAuthority"]["data"]
26+
endpoint = cluster_info["endpoint"]
2627

2728
kube_config = {
28-
'apiVersion': 'v1',
29-
'clusters': [
30-
{
31-
'cluster':
32-
{
33-
'server': endpoint,
34-
'certificate-authority-data': certificate
35-
},
36-
'name': 'k8s'
37-
38-
}],
39-
'contexts': [
29+
"apiVersion": "v1",
30+
"clusters": [
4031
{
41-
'context':
42-
{
43-
'cluster': 'k8s',
44-
'user': 'aws'
45-
},
46-
'name': 'aws'
47-
}],
48-
'current-context': 'aws',
49-
'Kind': 'config',
50-
'users': [
51-
{
52-
'name': 'aws',
53-
'user': 'lambda'
54-
}]
32+
"cluster": {
33+
"server": endpoint,
34+
"certificate-authority-data": certificate,
35+
},
36+
"name": "k8s",
37+
}
38+
],
39+
"contexts": [{"context": {"cluster": "k8s", "user": "aws"}, "name": "aws"}],
40+
"current-context": "aws",
41+
"Kind": "config",
42+
"users": [{"name": "aws", "user": "lambda"}],
5543
}
5644

57-
with open(KUBE_FILEPATH, 'w') as kube_file_content:
45+
with open(KUBE_FILEPATH, "w") as kube_file_content:
5846
yaml.dump(kube_config, kube_file_content, default_flow_style=False)
5947

6048

@@ -68,81 +56,87 @@ def get_bearer_token(cluster, region):
6856
STS_TOKEN_EXPIRES_IN = 60
6957
session = boto3.session.Session()
7058

71-
client = session.client('sts', region_name=region)
59+
client = session.client("sts", region_name=region)
7260
service_id = client.meta.service_model.service_id
7361

7462
signer = RequestSigner(
75-
service_id,
76-
region,
77-
'sts',
78-
'v4',
79-
session.get_credentials(),
80-
session.events
63+
service_id, region, "sts", "v4", session.get_credentials(), session.events
8164
)
8265

8366
params = {
84-
'method': 'GET',
85-
'url': 'https://sts.{}.amazonaws.com/?Action=GetCallerIdentity&Version=2011-06-15'.format(region),
86-
'body': {},
87-
'headers': {
88-
'x-k8s-aws-id': cluster
89-
},
90-
'context': {}
67+
"method": "GET",
68+
"url": "https://sts.{}.amazonaws.com/?Action=GetCallerIdentity&Version=2011-06-15".format(
69+
region
70+
),
71+
"body": {},
72+
"headers": {"x-k8s-aws-id": cluster},
73+
"context": {},
9174
}
9275

9376
signed_url = signer.generate_presigned_url(
94-
params,
95-
region_name=region,
96-
expires_in=STS_TOKEN_EXPIRES_IN,
97-
operation_name=''
77+
params, region_name=region, expires_in=STS_TOKEN_EXPIRES_IN, operation_name=""
9878
)
9979

100-
base64_url = base64.urlsafe_b64encode(signed_url.encode('utf-8')).decode('utf-8')
80+
base64_url = base64.urlsafe_b64encode(signed_url.encode("utf-8")).decode("utf-8")
10181

10282
# need to remove base64 encoding padding:
10383
# https://github.com/kubernetes-sigs/aws-iam-authenticator/issues/202
104-
return 'k8s-aws-v1.' + re.sub(r'=*', '', base64_url)
84+
return "k8s-aws-v1." + re.sub(r"=*", "", base64_url)
85+
10586

106-
def get_evictable_pods(api, node_name,label_selector):
107-
'''
87+
def get_evictable_pods(api, node_name, label_selector):
88+
"""
10889
This method will ensure we are only waiting for pods that matters based on
10990
label_selector
110-
'''
111-
field_selector = 'spec.nodeName=' + node_name
112-
pods = api.list_pod_for_all_namespaces(watch=False, field_selector=field_selector,
113-
label_selector = label_selector, include_uninitialized=True)
91+
"""
92+
field_selector = "spec.nodeName=" + node_name
93+
pods = api.list_pod_for_all_namespaces(
94+
watch=False,
95+
field_selector=field_selector,
96+
label_selector=label_selector,
97+
include_uninitialized=True,
98+
)
11499
return [pod for pod in pods.items]
115100

116-
def count_running_pods(api, node_name,label_selector):
117-
'''
101+
102+
def count_running_pods(api, node_name, label_selector):
103+
"""
118104
Report count for total running pods based on the label
119-
'''
120-
pods = get_evictable_pods(api, node_name,label_selector)
105+
"""
106+
pods = get_evictable_pods(api, node_name, label_selector)
121107
return len(pods)
122108

109+
123110
def handler(event, context):
124-
'''
111+
"""
125112
Lambda handler, this function will call the
126113
private functions to get the running pod count based on the label selector provided
127-
'''
128-
eks = boto3.client('eks', region_name=event['region'])
129-
#loading Kube Config
114+
"""
115+
eks = boto3.client("eks", region_name=event["region"])
116+
# loading Kube Config
130117
if not os.path.exists(KUBE_FILEPATH):
131-
create_kube_config(eks, event['cluster_name'])
118+
create_kube_config(eks, event["cluster_name"])
132119
k8s.config.load_kube_config(KUBE_FILEPATH)
133120
configuration = k8s.client.Configuration()
134-
#getting the auth token
135-
token = get_bearer_token(event['cluster_name'],event['region'])
136-
configuration.api_key['authorization'] = token
137-
configuration.api_key_prefix['authorization'] = 'Bearer'
121+
# getting the auth token
122+
token = get_bearer_token(event["cluster_name"], event["region"])
123+
configuration.api_key["authorization"] = token
124+
configuration.api_key_prefix["authorization"] = "Bearer"
138125
# API
139126
api = k8s.client.ApiClient(configuration)
140127
core_v1_api = k8s.client.CoreV1Api(api)
141128

142129
# Get all the pods
143-
running_pod_count=count_running_pods(core_v1_api,node_name=event['node_name'],
144-
label_selector=event['label_selector'])
145-
output_json = {"region": event['region'], "node_name" : event['node_name'] ,
146-
"instance_id" : event['instance_id'], "cluster_name": event['cluster_name'],
147-
"activePodCount": running_pod_count}
130+
running_pod_count = count_running_pods(
131+
core_v1_api,
132+
node_name=event["node_name"],
133+
label_selector=event["label_selector"],
134+
)
135+
output_json = {
136+
"region": event["region"],
137+
"node_name": event["node_name"],
138+
"instance_id": event["instance_id"],
139+
"cluster_name": event["cluster_name"],
140+
"activePodCount": running_pod_count,
141+
}
148142
return output_json

lambdas/detachAndTerminateNode.py

Lines changed: 28 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -4,88 +4,81 @@
44
import time
55
import boto3
66

7-
ec2_client = boto3.client('ec2')
8-
asg_client = boto3.client('autoscaling')
7+
ec2_client = boto3.client("ec2")
8+
asg_client = boto3.client("autoscaling")
99

1010

1111
def lambda_handler(event, context):
12-
''' The base lambda handler function
12+
"""The base lambda handler function
1313
This function, get the instance id, check for ASG tag
1414
put it back in Inservice state
1515
and detach it from the corresponding ASG
16-
'''
17-
instance_id = event['instance_id']
16+
"""
17+
instance_id = event["instance_id"]
1818
# Capture all the info about the instance so we can extract the ASG name later
1919
response = ec2_client.describe_instances(
2020
Filters=[
21-
{
22-
'Name': 'instance-id',
23-
'Values': [instance_id]
24-
},
21+
{"Name": "instance-id", "Values": [instance_id]},
2522
],
2623
)
2724

2825
# Get the ASG name from the response JSON
29-
tags = response['Reservations'][0]['Instances'][0]['Tags']
30-
autoscaling_name = next(t["Value"] for t in tags if t["Key"] == "aws:autoscaling:groupName")
26+
tags = response["Reservations"][0]["Instances"][0]["Tags"]
27+
autoscaling_name = next(
28+
t["Value"] for t in tags if t["Key"] == "aws:autoscaling:groupName"
29+
)
3130

32-
#Put the instance in standby
31+
# Put the instance in standby
3332
response = asg_client.exit_standby(
3433
InstanceIds=[
3534
instance_id,
3635
],
37-
AutoScalingGroupName=autoscaling_name
36+
AutoScalingGroupName=autoscaling_name,
3837
)
3938

4039
response = asg_client.describe_auto_scaling_instances(
4140
InstanceIds=[
4241
instance_id,
4342
]
4443
)
45-
while response['AutoScalingInstances'][0]['LifecycleState']!='InService':
44+
while response["AutoScalingInstances"][0]["LifecycleState"] != "InService":
4645
print(" The node is not yet in service state, waiting for 5 more seconds")
4746
time.sleep(5)
4847
response = asg_client.describe_auto_scaling_instances(
49-
InstanceIds=[
50-
instance_id,
51-
]
52-
)
53-
if response['AutoScalingInstances'][0]['LifecycleState']=='InService':
48+
InstanceIds=[
49+
instance_id,
50+
]
51+
)
52+
if response["AutoScalingInstances"][0]["LifecycleState"] == "InService":
5453
break
55-
# Detach the instance
54+
# Detach the instance
5655
response = asg_client.detach_instances(
5756
InstanceIds=[
5857
instance_id,
5958
],
6059
AutoScalingGroupName=autoscaling_name,
61-
ShouldDecrementDesiredCapacity=True
60+
ShouldDecrementDesiredCapacity=True,
6261
)
6362

6463
response = ec2_client.describe_instances(
6564
Filters=[
66-
{
67-
'Name': 'instance-id',
68-
'Values': [instance_id]
69-
},
65+
{"Name": "instance-id", "Values": [instance_id]},
7066
],
7167
)
7268

73-
while response['Reservations'][0]['Instances'][0]['Tags']==autoscaling_name:
69+
while response["Reservations"][0]["Instances"][0]["Tags"] == autoscaling_name:
7470
# sleep added to reduce the number of api calls for checking the status
7571
print(" The node is not yet detached, waiting for 10 more seconds")
7672
time.sleep(10)
7773
response = ec2_client.describe_instances(
78-
Filters=[
79-
{
80-
'Name': 'instance-id',
81-
'Values': [instance_id]
82-
},
83-
],
84-
)
85-
if response['Reservations'][0]['Instances'][0]['Tags']!=autoscaling_name:
74+
Filters=[
75+
{"Name": "instance-id", "Values": [instance_id]},
76+
],
77+
)
78+
if response["Reservations"][0]["Instances"][0]["Tags"] != autoscaling_name:
8679
break
8780

88-
#if the node is detqched then stop the instance
81+
# if the node is detqched then stop the instance
8982

9083
response = ec2_client.stop_instances(
9184
InstanceIds=[

0 commit comments

Comments
 (0)