generated from amazon-archives/__template_MIT-0
-
Notifications
You must be signed in to change notification settings - Fork 17
Open
Description
This isn't an issue but wanted to thank you for providing this. I look forward to converting our existing code to using this workflow when I get the next chance. In order to accomplish this currently we resorted to this type of workflow.
jobs:
deploy:
runs-on: ubuntu-latest
permissions:
contents: read # Default read
id-token: write # Required for OIDC token creation
steps:
- name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4
- name: Check Python version
run: python --version
- name: Assume AWS Role and Configure AWS CLI
uses: aws-actions/configure-aws-credentials@7474bc4690e29a8392af63c5b98e7449536d5c3a #v4
with:
role-to-assume: ${{ secrets.AWS_ROLE_TO_ASSUME }}
aws-region: ${{ vars.AWS_REGION }}
- name: Package and deploy Lambda functions
env:
ENVIRONMENT: ${{ github.event.inputs.environment || (startsWith(github.ref, 'refs/tags/prod-') && 'production') || 'development' }}
S3_BUCKET: ${{ vars.DEPLOY_BUCKET }}
LAMBDA_ROLE: ${{ secrets.AWS_LAMBDA_ROLE }}
LAMBDA_TIMEOUT: 15
LAMBDA_MEMORY_SIZE: 128
LAMBDA_RUNTIME: python3.10
LAMBDA_HANDLER: lambda_function.lambda_handler
run: |
set -e # Exit immediately if a command exits with a non-zero status
set -x # Print commands and their arguments as they are executed
# Clean up deployment location
echo "Cleaning up S3 environment directory s3://${{ env.S3_BUCKET }}/my-lambda/${{ env.ENVIRONMENT }}/"
aws s3 rm s3://${{ env.S3_BUCKET }}/my-lambda/${{ env.ENVIRONMENT }}/ --recursive
function_name=my-lambda-${{ env.ENVIRONMENT }}
zip_file=${function_name}.zip
temp_dir=$(mktemp -d)
# Copy only necessary files and directories
cp lambda/lambda_function.py $temp_dir/
cp -r lambda/tests $temp_dir/tests
cp -r lambda/utils $temp_dir/utils
# Install dependencies
if [ -f "lambda/requirements.txt" ]; then
pip install -r lambda/requirements.txt -t $temp_dir
fi
cd $temp_dir && zip -rq ../$zip_file . && cd ..
aws s3 cp --no-progress $zip_file s3://${S3_BUCKET}/my-lambda/${{ env.ENVIRONMENT }}/${zip_file}
cd $GITHUB_WORKSPACE
rm -rf $temp_dir
s3_key=my-lambda/${{ env.ENVIRONMENT }}/${zip_file}
check_lambda_update_status() {
while true; do
state=$(aws lambda get-function-configuration --function-name $function_name --query 'State' --output text)
last_update_status=$(aws lambda get-function-configuration --function-name $function_name --query 'LastUpdateStatus' --output text)
if [ "$state" == "Active" ] && [ "$last_update_status" == "Successful" ]; then
echo "Lambda function is active and last update was successful. Proceeding with update."
break
else
echo "Lambda function is in $state state and last update status is $last_update_status. Waiting for it to become active and successful."
sleep 10
fi
done
}
if ! aws lambda update-function-code --function-name $function_name --s3-bucket $S3_BUCKET --s3-key $s3_key; then
echo "Creating new Lambda function $function_name"
aws lambda create-function --function-name $function_name --runtime ${{ env.LAMBDA_RUNTIME }} --role ${{ env.LAMBDA_ROLE }} --handler ${{ env.LAMBDA_HANDLER }} --code S3Bucket=${{ env.S3_BUCKET }},S3Key=$s3_key --timeout ${{ env.LAMBDA_TIMEOUT }} --memory-size ${{ env.LAMBDA_MEMORY_SIZE }} --environment Variables="{ENVIRONMENT=${{ env.ENVIRONMENT }}}"
else
echo "Updated existing Lambda function $function_name"
check_lambda_update_status
aws lambda update-function-configuration --function-name $function_name --timeout ${{ env.LAMBDA_TIMEOUT }} --memory-size ${{ env.LAMBDA_MEMORY_SIZE }}
fireedham-aws
Metadata
Metadata
Assignees
Labels
No labels