-
Notifications
You must be signed in to change notification settings - Fork 33
/
Copy pathconfig_docker.py
284 lines (246 loc) · 9.73 KB
/
config_docker.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import os
import sys
import argparse
import shutil
import glob
import json
import urllib
import re
from pathlib import Path
import boto3
from botocore.config import Config
from botocore.exceptions import ClientError, ProfileNotFound
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--profile", help="Your AWS CLI profile name")
group.add_argument(
"--use-envars", action="store_true", help="Use AWS CLI environment variables"
)
group.add_argument(
"--clean",
const=True,
type=bool,
nargs="?",
help="Clear all docker configuration files from volumes directories",
)
FILE_PATH_WARNING = """
*************************************************************************
*** WARNING!!! WARNING!!! WARNING!!! WARNING!!! WARNING!!! WARNING!!! ***
*** ***
*** The current location of the repository has too long of a file ***
*** path to successfully launch the docker container. The total ***
*** length must be less than 104 characters. Reduce the file path by ***
*** the value below, or the Greengrass service will not start. ***
*** ***
*************************************************************************
"""
GITIGNORE_CONTENT = """*
!.gitignore
"""
def replace(data: dict, match: str, repl):
"""Replace variable with replacement text"""
if isinstance(data, dict):
return {k: replace(v, match, repl) for k, v in data.items()}
elif isinstance(data, list):
return [replace(i, match, repl) for i in data]
else:
return data.replace(match, repl)
def verify_cwd():
"""
Verify script is executed from docker/ directory,
also, BIG ALERT if target file path to:
docker/volumes/gg_root/v2/ipc.socket
is greater than 104 characters.
https://unix.stackexchange.com/questions/367008/why-is-socket-path-length-limited-to-a-hundred-chars
"""
# check for sibling level directories and ../cdk
docker_assets_path = Path("./volumes")
if not os.path.exists(docker_assets_path):
print(
f"Could not find directory './volumes/, run this script from the 'docker/' directory. 'python3 config_docker.py' "
)
sys.exit(1)
# Determine current file path length and determine if full path to ipc.socket
# From CWD addition characters to ipc.socket 37 characters
cwd = Path(".", "volumes/gg_root/ipc.socket")
if len(str(cwd.absolute())) > 103:
print(FILE_PATH_WARNING)
print(
f"********** Total current length is {len(str(cwd.absolute()))}, {len(str(cwd.absolute())) - 103} characters too long\n"
)
def clean_config(dirs_to_clean: list):
"""remove all docker volume files, restore to unconfigured state, which
is empty directories with a `.gitignore` file to not include any content
when committing code changes
:param dirs_to_clean: directories to clean out
:type dirs_to_clean: list
"""
print("Cleaning Docker volumes...")
for dir in dirs_to_clean:
path = Path(dir)
if path.exists():
print(f"Deleting files in {path}")
shutil.rmtree(path)
os.mkdir(path)
with open(path / ".gitignore", "w") as f:
f.write(GITIGNORE_CONTENT)
print(f"Directory '{dir} cleaned")
def check_for_config(dirs_to_check: list, config_files: list):
"""
Verify target directories are empty of non dot files, and that
config files exist
"""
print("Verifying docker volume directories empty before creating configurations")
for dir in dirs_to_check:
path = Path(dir)
files = glob.glob(f"{path}/[!.]*")
if files:
print(
f"Files found in '{path}', not overwriting. Run with --clean to create new configurations. Files found: {files}"
)
sys.exit(1)
print("All configuration directories empty, continuing")
for file in config_files:
path = Path(file)
if not file:
print(
f"File {file} not found. This file must exist for creating configuration files."
)
sys.exit(1)
print("All configuration file templates found, continuing.")
return
def read_manifest():
"""Read the manifest file to get the stackname
As of cdk 1.13.1, the stackname can be found in the manifest file
as an artifact object with a type of aws:cloudformation:stack
"""
manifest_file = Path("../cdk/cdk.out/manifest.json")
if manifest_file.is_file():
with open(manifest_file) as f:
manifest = f.read()
else:
print(
"manifest.json not found in cdk.out directory. Has the stack been deployed?"
)
sys.exit(1)
try:
manifest = json.loads(manifest)
except ValueError as e:
print(f"Invalid format of {manifest_file}, error: {e}")
sys.exit(1)
# Return the stack name, account, and region
for i in manifest["artifacts"]:
if manifest["artifacts"][i]["type"] == "aws:cloudformation:stack":
return {
"stackname": i,
"account": manifest["artifacts"][i]["environment"].split("/")[2],
"region": manifest["artifacts"][i]["environment"].split("/")[-1],
}
def read_parameter(
parameter: str, session: boto3.Session, with_decryption: bool = False
):
"""Read contents of certificate from Systems Manager Parameter Store"""
ssm = session.client("ssm")
try:
response = ssm.get_parameter(Name=parameter, WithDecryption=with_decryption)
except ClientError as e:
print(f"Error calling ssm.get_parameter() for parameter {parameter}, {e}")
except Exception as e:
print(f"Uncaught error, {e}")
return response["Parameter"]["Value"]
def replace_variables(file: str, map: dict):
"""
Replace ${TOKEN} from file with key/values in map
"""
with open(Path(file), "r") as f:
template = f.read()
for k in map:
template = re.sub(rf"\${{{k}}}", map[k], template)
return template
if __name__ == "__main__":
# Confirm profile given as parameters
args = parser.parse_args()
docker_config_directories = [
"./volumes/certs",
"./volumes/config",
"./volumes/gg_root",
]
template_files = [
"./templates/config.yaml.template",
"./templates/docker-compose.yaml.template",
]
config_values = {}
verify_cwd()
# if --clean, clear all directories and exit
if args.clean:
clean_config(docker_config_directories)
sys.exit(0)
# check for contents in certs/ config and /gg_root/, alert and exit
check_for_config(
dirs_to_check=docker_config_directories, config_files=template_files
)
# read cdk.out for stack details or use --region and --stackname
stackname_manifest = read_manifest()
stackname = stackname_manifest["stackname"]
region = stackname_manifest["region"]
# read and populate stack outputs from cloud
try:
if args.profile:
# Credentials from AWS CLI profile
session = boto3.Session(profile_name=args.profile, region_name=region)
else:
# Credentials from system environment (--use-envars)
session = boto3.Session(region_name=region)
cloudformation = session.resource("cloudformation")
stack = cloudformation.Stack(stackname)
stack.load()
except ProfileNotFound as e:
print(f"The AWS config profile ({args.profile}) could not be found.")
sys.exit(1)
except Exception as e:
print(e)
sys.exit(1)
# Set values for template
for output in stack.outputs:
if output["OutputKey"] == "CredentialProviderEndpointAddress":
config_values["CREDENTIAL_PROVIDER_ENDPOINT"] = output["OutputValue"]
elif output["OutputKey"] == "DataAtsEndpointAddress":
config_values["DATA_ATS_ENDPOINT"] = output["OutputValue"]
elif output["OutputKey"] == "IotRoleAliasName":
config_values["IOT_ROLE_ALIAS"] = output["OutputValue"]
elif output["OutputKey"] == "ThingArn":
config_values["THING_NAME"] = output["OutputValue"].split("/")[-1]
elif output["OutputKey"] == "CertificatePemParameter":
certificate_pem = read_parameter(
parameter=output["OutputValue"], session=session
)
elif output["OutputKey"] == "PrivateKeySecretParameter":
private_key_pem = read_parameter(
parameter=output["OutputValue"], session=session, with_decryption=True
)
config_values["AWS_REGION"] = region
# Read root CA
with urllib.request.urlopen(
"https://www.amazontrust.com/repository/AmazonRootCA1.pem"
) as response:
root_ca_pem = response.read().decode("utf-8")
# process template files
config_template = replace_variables(
file="./templates/config.yaml.template", map=config_values
)
docker_compose_template = replace_variables(
file="./templates/docker-compose.yml.template", map=config_values
)
# Write the files!
with open(Path("./volumes/certs/device.pem.crt"), "w") as f:
f.write(certificate_pem)
with open(Path("./volumes/certs/private.pem.key"), "w") as f:
f.write(private_key_pem)
with open(Path("./volumes/certs/AmazonRootCA1.pem"), "w") as f:
f.write(root_ca_pem)
with open(Path("./volumes/config/config.yaml"), "w") as f:
f.write(config_template)
with open(Path("./docker-compose.yml"), "w") as f:
f.write(docker_compose_template)