forked from patternhelloworld/docker-blue-green-runner
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathrun.sh
290 lines (222 loc) · 12.6 KB
/
run.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
#!/bin/bash
sudo sed -i -e "s/\r$//g" $(basename $0) || sed -i -e "s/\r$//g" $(basename $0)
set -eu
sudo chmod a+x *.sh
echo "[NOTICE] Substituting CRLF with LF to prevent possible CRLF errors..."
sudo bash prevent-crlf.sh
git config apply.whitespace nowarn
git config core.filemode false
sleep 3
source ./util.sh
source ./use-app.sh
source ./use-nginx.sh
source ./use-consul.sh
# Back-up priority : new > blue or green > latest
backup_app_to_previous_images(){
# App
echo "[NOTICE] Docker tag 'previous' 'previous2'"
docker tag ${project_name}:previous ${project_name}:previous2 || echo "[NOTICE] the 'previous' image does NOT exist."
if [[ $(docker images -q ${project_name}:new 2> /dev/null) != '' ]]
then
echo "[NOTICE] Docker tag 'new' 'previous'"
docker tag ${project_name}:new ${project_name}:previous && return || echo "[NOTICE] There is no 'new' tag image to backup the app."
fi
echo "[NOTICE] Since there is no 'new' tag image for the app, depending on 'cache_all_states' previously run in the 'cache_global_vars' stage, we will back ${state} up."
docker tag ${project_name}:${state} ${project_name}:previous && return || echo "[NOTICE] No ${state} tagged image."
echo "[NOTICE] Since there is no ${state} images, we will attempt to back up the latest image as previous"
docker tag ${project_name}:latest ${project_name}:previous || echo "[NOTICE] No 'latest' tagged image."
}
backup_nginx_to_previous_images(){
# NGINX
echo "[NOTICE] Before creating the 'previous' tagged image of Nginx, if there is an existing 'previous' tagged image, we will proceed with backing it up as the 'previous2' tagged image."
docker tag ${project_name}-nginx:previous ${project_name}-nginx:previous2 || echo "[NOTICE] No 'previous' tagged image."
if [[ $(docker images -q ${project_name}-nginx:new 2> /dev/null) != '' ]]
then
echo "[NOTICE] Docker tag 'new' 'previous' (NGINX)"
docker tag ${project_name}-nginx:new ${project_name}-nginx:previous && return || echo "[NOTICE] No Nginx 'new' tagged image."
fi
echo "[NOTICE] Since there is no existing Nginx 'new' image, we will attempt to back up the latest image as 'previous'."
docker tag ${project_name}-nginx:latest ${project_name}-nginx:previous || echo "[NOTICE] No Nginx 'latest' tagged image."
}
give_host_group_id_full_permissions(){
# By default, all volume folders are granted 'permission for the user's group of the host (not the root user, but the current user)'.
# Then, the permissions for the App using the folder in the container (such as www-data, redis, etc.)
# are given in the Dockerfile or ENTRYPOINT.
# This is because, in the development environment,
# volume folders may need to be modified by IDEs or other tools on the host,
# so permissions are given to the host, and permissions are also required for the libraries to access each folder inside Docker
# (permissions inside Docker are executed in the ENTRYPOINT script)
echo "[NOTICE] !! APP_ENV=local Only : To facilitate access from an IDE to Docker's internal permissions, we grant host permissions locally and set them to 777."
sudo chgrp -R ${host_root_gid} ${host_root_location}
sudo chmod -R 777 ${host_root_location}
}
terminate_whole_system(){
if [[ ${docker_layer_corruption_recovery} == true ]]; then
docker rmi -f ${project_name}-nginx:latest
docker rmi -f ${project_name}-nginx:new
docker rmi -f ${project_name}-nginx:previous
docker rmi -f ${project_name}-nginx:previous2
docker rmi -f ${project_name}:latest
docker rmi -f ${project_name}:new
docker rmi -f ${project_name}:previous
docker rmi -f ${project_name}:previous2
docker rmi -f ${project_name}:blue
docker rmi -f ${project_name}:green
docker-compose -f docker-${orchestration_type}-${project_name}-local.yml down || echo "[NOTICE] docker-${orchestration_type}-${project_name}-local.yml down failure"
docker-compose -f docker-${orchestration_type}-${project_name}-real.yml down || echo "[NOTICE] docker-${orchestration_type}-${project_name}-real.yml down failure"
docker-compose -f docker-${orchestration_type}-consul.yml down || echo "[NOTICE] docker-${orchestration_type}-${project_name}-consul.yml down failure"
docker-compose -f docker-compose-${project_name}-nginx.yml down || echo "[NOTICE] docker-compose-${project_name}-nginx.yml down failure"
docker network rm consul
docker network rm consul
docker system prune -f
fi
}
load_all_containers(){
# app, consul, nginx
# In the past, restarting Nginx before App caused error messages like "upstream not found" in the Nginx configuration file. This seems to have caused a 502 error on the socket side.
echo "[NOTICE] Creating consul network..."
if [[ ${orchestration_type} != 'stack' ]]; then
docker network create consul || echo "[NOTICE] Consul Network has already been created. You can ignore this message."
else
docker network create --driver overlay consul || echo "[NOTICE] Consul Network has already been created. You can ignore this message."
fi
# Therefore, it is safer to restart the containers in the order of Consul -> App -> Nginx.
if [[ ${consul_restart} == 'true' ]]; then
consul_down_and_up
fi
echo "[NOTICE] Run the app as a ${new_state} container. (As long as NGINX_RESTART is set to 'false', this won't stop the running container since this is a BLUE-GREEN deployment.)"
app_down_and_up
#if [[ ${orchestration_type} != 'stack' ]]; then
echo "[NOTICE] Check the integrity inside the '${project_name}-${new_state} container'."
if [[ ${app_env} == 'local' ]]; then
re=$(check_availability_inside_container ${new_state} 600 30 | tail -n 1) || exit 1;
else
re=$(check_availability_inside_container ${new_state} 120 5 | tail -n 1) || exit 1;
fi
if [[ ${re} != 'true' ]]; then
echo "[ERROR] Failed in running the ${new_state} container. Run ' docker logs -f ${project_name}-${new_state} (compose), docker service ps ${project_name}-${new_state}}_${project_name}-${new_state} (stack) ' to check errors (Return : ${re})" && exit 1
fi
#else
# echo "[NOTICE] Check the integrity from Consul to the '${project_name}-${new_state} stack'."
# if [[ ${app_env} == 'local' ]]; then
# re=$(check_availability_from_consul_to_container ${new_state} 30 | tail -n 1) || exit 1;
# else
# re=$(check_availability_from_consul_to_container ${new_state} 5 | tail -n 1) || exit 1;
# fi
#sleep 20
# echo "aaa"
#if [[ ${re} != 'true' ]]; then
# echo "[ERROR] Failed in running the ${new_state} container. Run 'docker logs -f ${project_name}-${new_state}' to check errors (Return : ${re})" && exit 1
#fi
# fi
if [[ ${nginx_restart} == 'true' ]]; then
check_nginx_templates_integrity
nginx_down_and_up
fi
check_necessary_supporting_containers_loaded || (echo "[ERROR] Failed in loading necessary supporting containers." && exit 1)
check_supporting_containers_loaded || (echo "[ERROR] Failed in loading supporting containers. We will conduct the Nginx Contingency Plan.")
}
backup_to_new_images(){
echo "[NOTICE] docker tag latest new"
docker tag ${project_name}:latest ${project_name}:new || echo "[NOTICE] the ${project_name}:latest image does NOT exist."
echo "[NOTICE] docker tag latest new (NGINX)"
docker tag ${project_name}-nginx:latest ${project_name}-nginx:new || echo "[NOTICE] ${project_name}-nginx:latest does NOT exist."
}
_main() {
# [A] Get mandatory variables
check_necessary_commands
cache_global_vars
# The 'cache_all_states' in 'cache_global_vars' function decides which state should be deployed. If this is called later at a point in this script, states could differ.
local initially_cached_old_state=${state}
check_env_integrity
echo "[NOTICE] Finally, !! Deploy the App as !! ${new_state} !!, we will now deploy '${project_name}' in a way of 'Blue-Green'"
# [A-1] Set mandatory files
## App
initiate_docker_compose_file
apply_env_service_name_onto_app_yaml
apply_docker_compose_environment_onto_app_yaml
if [[ ${app_env} == 'real' ]]; then
apply_docker_compose_volumes_onto_app_real_yaml
fi
if [[ ${skip_building_app_image} != 'true' ]]; then
backup_app_to_previous_images
fi
## Nginx
if [[ ${nginx_restart} == 'true' ]]; then
initiate_nginx_docker_compose_file
apply_env_service_name_onto_nginx_yaml
apply_ports_onto_nginx_yaml
apply_docker_compose_volumes_onto_app_nginx_yaml
save_nginx_ctmpl_template_from_origin
save_nginx_contingency_template_from_origin
save_nginx_logrotate_template_from_origin
save_nginx_main_template_from_origin
backup_nginx_to_previous_images
fi
# [A-2] Set 'Shared Volume Group'
local add_host_users_to_shared_volume_group_re=$(add_host_users_to_host_group ${shared_volume_group_id} ${shared_volume_group_name} ${uids_belonging_to_shared_volume_group_id} | tail -n 1) || echo "[WARNING] Running 'add_host_users_to_shared_volume_group' failed.";
if [[ ${add_host_users_to_shared_volume_group_re} = 'false' ]]; then
echo "[WARNING] Running 'add_host_users_to_host_group'(SHARED) failed."
fi
# [A-3] Etc.
if [[ ${app_env} == 'local' ]]; then
give_host_group_id_full_permissions
fi
if [[ ${docker_layer_corruption_recovery} == 'true' ]]; then
terminate_whole_system
fi
# [B] Build Docker images for the App, Nginx, Consul
if [[ ${skip_building_app_image} != 'true' ]]; then
load_app_docker_image
fi
if [[ ${consul_restart} == 'true' ]]; then
load_consul_docker_image
fi
if [[ ${nginx_restart} == 'true' ]]; then
load_nginx_docker_image
fi
if [[ ${only_building_app_image} == 'true' ]]; then
echo "[NOTICE] Successfully built the App image : ${new_state}" && exit 0
fi
local cached_new_state=${new_state}
cache_all_states
if [[ ${cached_new_state} != "${new_state}" ]]; then
(echo "[ERROR] Just checked all states shortly after the Docker Images had been done built. The state the App was supposed to be deployed as has been changed. (Original : ${cached_new_state}, New : ${new_state}). For the safety, we exit..." && exit 1)
fi
# [C] docker-compose up the App, Nginx, Consul & * Internal Integrity Check for the App
load_all_containers
# [D] Set Consul
./activate.sh ${new_state} ${state} ${new_upstream} ${consul_key_value_store}
# [E] External Integrity Check, if fails, 'emergency-nginx-down-and-up.sh' will be run.
re=$(check_availability_out_of_container | tail -n 1);
if [[ ${re} != 'true' ]]; then
echo "[WARNING] ! ${new_state}'s availability issue found. Now we are going to run 'emergency-nginx-down-and-up.sh' immediately."
bash emergency-nginx-down-and-up.sh
re=$(check_availability_out_of_container | tail -n 1);
if [[ ${re} != 'true' ]]; then
echo "[ERROR] Failed to call app_url on .env outside the container. Consider running bash rollback.sh OR check your !firewall. (result value : ${re})" && exit 1
fi
fi
# [F] Finalizing the process : from this point on, regarded as "success".
if [[ ${skip_building_app_image} != 'true' ]]; then
backup_to_new_images
fi
echo "[DEBUG] state : ${state}, new_state : ${new_state}, initially_cached_old_state : ${initially_cached_old_state}"
echo "[NOTICE] For safety, finally check Consul pointing before stopping the previous container (${initially_cached_old_state})."
local consul_pointing=$(docker exec ${project_name}-nginx curl ${consul_key_value_store}?raw 2>/dev/null || echo "failed")
if [[ ${consul_pointing} != ${initially_cached_old_state} ]]; then
if [[ ${orchestration_type} != 'stack' ]]; then
docker-compose -f docker-${orchestration_type}-${project_name}-${app_env}.yml stop ${project_name}-${initially_cached_old_state}
echo "[NOTICE] The previous (${initially_cached_old_state}) container (initially_cached_old_state) has been stopped because the deployment was successful. (If NGINX_RESTART=true or CONSUL_RESTART=true, existing containers have already been terminated in the load_all_containers function.)"
else
docker stack rm ${project_name}-${initially_cached_old_state}
echo "[NOTICE] The previous (${initially_cached_old_state}) service (initially_cached_old_state) has been stopped because the deployment was successful. (If NGINX_RESTART=true or CONSUL_RESTART=true, existing containers have already been terminated in the load_all_containers function.)"
fi
else
echo "[NOTICE] The previous (${initially_cached_old_state}) container (initially_cached_old_state) has NOT been stopped because the current Consul Pointing is ${consul_pointing}."
fi
echo "[NOTICE] Delete <none>:<none> images."
docker rmi $(docker images -f "dangling=true" -q) || echo "[NOTICE] Any images in use will not be deleted."
echo "[NOTICE] APP_URL : ${app_url}"
}
_main