forked from aws/sagemaker-python-sdk
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtest_local_mode_cpu_jobs.py
294 lines (247 loc) · 10.5 KB
/
test_local_mode_cpu_jobs.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import time
from typing import Union
import os
import re
import pytest
import subprocess
import logging
import numpy as np
import pandas as pd
import sagemaker
import boto3
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from pathlib import Path
from sagemaker.local import LocalSession
from sagemaker.processing import (
ProcessingInput,
ProcessingOutput
)
from sagemaker.sklearn import SKLearn
from sagemaker.sklearn.processing import SKLearnProcessor
from sagemaker.deserializers import CSVDeserializer
from sagemaker.pytorch import PyTorchModel
from sagemaker.serializers import CSVSerializer
# Replace this role ARN with an appropriate role for your environment
ROLE = "arn:aws:iam::111111111111:role/service-role/AmazonSageMaker-ExecutionRole-20200101T000001"
def ensure_docker_compose_installed():
"""
Downloads the Docker Compose plugin if not present, and verifies installation
by checking the output of 'docker compose version' matches the pattern:
'Docker Compose version vX.Y.Z'
"""
cli_plugins_path = Path.home() / ".docker" / "cli-plugins"
cli_plugins_path.mkdir(parents=True, exist_ok=True)
compose_binary_path = cli_plugins_path / "docker-compose"
if not compose_binary_path.exists():
subprocess.run(
[
"curl",
"-SL",
"https://github.com/docker/compose/releases/download/v2.3.3/docker-compose-linux-x86_64",
"-o",
str(compose_binary_path),
],
check=True,
)
subprocess.run(["chmod", "+x", str(compose_binary_path)], check=True)
# Verify Docker Compose version
try:
output = subprocess.check_output(["docker", "compose", "version"], stderr=subprocess.STDOUT)
output_decoded = output.decode("utf-8").strip()
logging.info(f"'docker compose version' output: {output_decoded}")
# Example expected format: "Docker Compose version vxxx"
pattern = r"Docker Compose version+"
match = re.search(pattern, output_decoded)
assert (
match is not None
), f"Could not find a Docker Compose version string matching '{pattern}' in: {output_decoded}"
except subprocess.CalledProcessError as e:
raise AssertionError(f"Failed to verify Docker Compose: {e}")
"""
Local Model: ProcessingJob
"""
@pytest.mark.local
def test_scikit_learn_local_processing():
"""
Test local mode processing with a scikit-learn processor.
This uses the same logic as scikit_learn_local_processing.py but in
a pytest test function. This test deploys the model locally via Docker,
and asserts that the output is received.
"""
ensure_docker_compose_installed()
# 1. Create local session for testing
sagemaker_session = LocalSession()
sagemaker_session.config = {"local": {"local_code": True}}
# 2. Define a scikit-learn processor in local mode
processor = SKLearnProcessor(
framework_version="1.2-1",
instance_count=1,
instance_type="local",
role=ROLE,
sagemaker_session=sagemaker_session
)
logging.warning("Starting local processing job.")
logging.warning("Note: the first run may take time to pull the required Docker image.")
# 3. Run the processing job locally
# - Update 'source' and 'destination' paths based on your local folder setup
processor.run(
code="sample_processing_script.py",
inputs=[
ProcessingInput(
source="s3://sagemaker-example-files-prod-us-east-1/datasets/tabular/uci_bank_marketing/bank-additional-full.csv",
destination="/opt/ml/processing/input"
)
],
outputs=[
ProcessingOutput(
output_name="train_data",
source="/opt/ml/processing/output/train",
destination="./output_data/train",
),
ProcessingOutput(
output_name="validation_data",
source="/opt/ml/processing/output/validation",
destination="./output_data/validation"
),
ProcessingOutput(
output_name="test_data",
source="/opt/ml/processing/output/test",
destination="./output_data/test"
),
],
)
assert True
"""
Local Model: Inference
"""
@pytest.mark.local
def test_pytorch_local_model_inference():
"""
Test local mode inference with NLP model using PyTorch.
This test deploys the model locally via Docker, performs an inference
on a sample dataset, and asserts that the output is received.
"""
ensure_docker_compose_installed()
# 1. Create a local session for inference
sagemaker_session = LocalSession()
sagemaker_session.config = {"local": {"local_code": True}}
# pre created model for inference
model_dir = 's3://aws-ml-blog/artifacts/pytorch-nlp-script-mode-local-model-inference/model.tar.gz'
# sample dummy inference
test_data = [
"Never allow the same bug to bite you twice.",
"The best part of Amazon SageMaker is that it makes machine learning easy.",
"Amazon SageMaker Inference Recommender helps you choose the best available compute instance and configuration to deploy machine learning models for optimal inference performance and cost."
]
logging.warning(f'test_data: {test_data}')
model = PyTorchModel(
model_data=model_dir,
framework_version='1.8',
# source_dir='inference',
py_version='py3',
entry_point='sample_inference_script.py',
role=ROLE,
sagemaker_session=sagemaker_session
)
logging.warning('Deploying endpoint in local mode')
logging.warning(
'Note: if launching for the first time in local mode, container image download might take a few minutes to complete.')
predictor = model.deploy(
initial_instance_count=1,
instance_type='local',
container_startup_health_check_timeout=600
)
# create a new CSV serializer and deserializer
predictor.serializer = CSVSerializer()
predictor.deserializer = CSVDeserializer()
predictions = predictor.predict(
",".join(test_data)
)
logging.warning(f'predictions: {predictions}')
# delete endpoint, clean up and terminate
predictor.delete_endpoint(predictor.endpoint)
# assert model response
assert type(predictions) == list, "Response return type is a List"
assert len(predictions) >= 1, "empty list returned"
def download_training_and_eval_data():
logging.warning('Downloading training dataset')
# Load California Housing dataset, then join labels and features
california = datasets.fetch_california_housing()
dataset = np.insert(california.data, 0, california.target, axis=1)
# Create directory and write csv
os.makedirs("./data/train", exist_ok=True)
os.makedirs("./data/validation", exist_ok=True)
os.makedirs("./data/test", exist_ok=True)
train, other = train_test_split(dataset, test_size=0.3)
validation, test = train_test_split(other, test_size=0.5)
np.savetxt("./data/train/california_train.csv", train, delimiter=",")
np.savetxt("./data/validation/california_validation.csv", validation, delimiter=",")
np.savetxt("./data/test/california_test.csv", test, delimiter=",")
logging.warning('Downloading completed')
def do_inference_on_local_endpoint(predictor):
print(f'\nStarting Inference on endpoint (local).')
test_data = pd.read_csv("data/test/california_test.csv", header=None)
test_X = test_data.iloc[:, 1:]
test_y = test_data.iloc[:, 0]
predictions = predictor.predict(test_X.values)
logging.warning("Predictions: {}".format(predictions))
logging.warning("Actual: {}".format(test_y.values))
logging.warning(f"RMSE: {mean_squared_error(predictions, test_y.values)}")
return predictions, test_y.values, float(mean_squared_error(predictions, test_y.values))
"""
Local Model: TrainingJob and Inference
"""
@pytest.mark.local
def test_sklearn_local_model_train_inference():
"""
Test local mode training and inference with sagemaker SKLearn.
This test runs the model locally via Docker, performs an inference with
sample dataset, and asserts that the output is received.
"""
download_training_and_eval_data()
logging.warning('Starting model training.')
logging.warning('Note: if launching for the first time in local mode, container image download might take a few minutes to complete.')
# 1. Create a local session for inference
sagemaker_session = LocalSession()
sagemaker_session.config = {"local": {"local_code": True}}
sklearn = SKLearn(
entry_point="sample_training_script.py",
# source_dir='training',
framework_version="1.2-1",
role=ROLE,
sagemaker_session=sagemaker_session,
instance_type="local",
hyperparameters={"max_leaf_nodes": 30},
)
train_input = "file://./data/train/california_train.csv"
validation_input = "file://./data/validation/california_validation.csv"
sklearn.fit({"train": train_input, "validation": validation_input})
logging.warning('Completed model training')
logging.warning('Deploying endpoint in local mode')
predictor = sklearn.deploy(
initial_instance_count=1,
instance_type="local",
container_startup_health_check_timeout=600
)
# get predictions from local endpoint
test_preds, test_y, test_mse = do_inference_on_local_endpoint(predictor)
logging.warning('About to delete the endpoint')
predictor.delete_endpoint()
assert type(test_preds) == np.ndarray, f"predictions are not in a np.ndarray format: {test_preds}"
assert type(test_y) == np.ndarray, f"Y ground truth are not in a np.ndarray format: {test_y}"
assert type(test_mse) == float, f"MSE is not a number: {test_mse}"