Skip to content

Commit e221c7f

Browse files
committed
Add integ for test_hyperparameter_tuning_job and test_transform_job
1 parent beedb53 commit e221c7f

File tree

3 files changed

+344
-2
lines changed

3 files changed

+344
-2
lines changed

integ/sagemaker_cleaner.py

Lines changed: 41 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,11 @@
11
import datetime
2-
from sagemaker_core.main.resources import Model, EndpointConfig, Endpoint
2+
from sagemaker_core.main.resources import (
3+
HyperParameterTuningJob,
4+
Model,
5+
EndpointConfig,
6+
Endpoint,
7+
TransformJob,
8+
)
39

410

511
class SageMakerCleaner:
@@ -85,6 +91,40 @@ def cleanup_models(self, creation_time_before, creation_time_after):
8591
self._track_resource(failed=1)
8692
self._track_resource(deleted=1)
8793

94+
def cleanup_hyperparameter_tuningjob(self, creation_time_before, creation_time_after):
95+
"""Deletes Models before a given timestamp
96+
97+
Args:
98+
creation_time_before (datetime): timestamp for 'CreationTimeBefore' or 'CreatedBefore' boto3 parameter
99+
creation_time_after (datetime): timestamp for 'CreationTimeAfter' or 'CreatedAfter' boto3 parameter
100+
"""
101+
tuning_jobs = HyperParameterTuningJob.get_all(
102+
creation_time_before=creation_time_before, creation_time_after=creation_time_after
103+
)
104+
for tuning_job in tuning_jobs:
105+
try:
106+
tuning_job.delete()
107+
except:
108+
self._track_resource(failed=1)
109+
self._track_resource(deleted=1)
110+
111+
def cleanup_transform_job(self, creation_time_before, creation_time_after):
112+
"""Deletes Models before a given timestamp
113+
114+
Args:
115+
creation_time_before (datetime): timestamp for 'CreationTimeBefore' or 'CreatedBefore' boto3 parameter
116+
creation_time_after (datetime): timestamp for 'CreationTimeAfter' or 'CreatedAfter' boto3 parameter
117+
"""
118+
transform_jobs = TransformJob.get_all(
119+
creation_time_before=creation_time_before, creation_time_after=creation_time_after
120+
)
121+
for transform_job in transform_jobs:
122+
try:
123+
transform_job.stop()
124+
except:
125+
self._track_resource(failed=1)
126+
self._track_resource(deleted=1)
127+
88128
def _track_resource(self, deleted=0, failed=0):
89129
"""Updates the resource tracker with # of deleted, or failed resources
90130

integ/test_codegen.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,11 @@
1010
from sklearn.model_selection import train_test_split
1111

1212
from sagemaker_cleaner import handle_cleanup
13-
from sagemaker_core.main.shapes import ContainerDefinition, ProductionVariant, ProfilerConfig
13+
from sagemaker_core.main.shapes import (
14+
ContainerDefinition,
15+
ProductionVariant,
16+
ProfilerConfig,
17+
)
1418
from sagemaker_core.main.resources import (
1519
TrainingJob,
1620
AlgorithmSpecification,
Lines changed: 298 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,298 @@
1+
import datetime
2+
import logging
3+
import time
4+
import unittest
5+
import pandas as pd
6+
from io import StringIO
7+
8+
from sklearn.datasets import load_iris
9+
from sklearn.model_selection import train_test_split
10+
11+
from sagemaker_cleaner import handle_cleanup
12+
from sagemaker_core.main.shapes import (
13+
AutoParameter,
14+
Autotune,
15+
ContainerDefinition,
16+
HyperParameterAlgorithmSpecification,
17+
HyperParameterTrainingJobDefinition,
18+
HyperParameterTuningJobConfig,
19+
HyperParameterTuningJobObjective,
20+
ParameterRanges,
21+
ResourceLimits,
22+
TransformDataSource,
23+
TransformInput,
24+
TransformOutput,
25+
TransformResources,
26+
TransformS3DataSource,
27+
)
28+
from sagemaker_core.main.resources import (
29+
HyperParameterTuningJob,
30+
TrainingJob,
31+
TransformJob,
32+
AlgorithmSpecification,
33+
Channel,
34+
DataSource,
35+
S3DataSource,
36+
OutputDataConfig,
37+
ResourceConfig,
38+
StoppingCondition,
39+
Model,
40+
)
41+
from sagemaker_core.helper.session_helper import Session, get_execution_role
42+
43+
logger = logging.getLogger()
44+
45+
sagemaker_session = Session()
46+
region = sagemaker_session.boto_region_name
47+
role = get_execution_role()
48+
bucket = sagemaker_session.default_bucket()
49+
50+
### Data preparation for test_hyperparameter_tuning_job and test_transform_job
51+
data = sagemaker_session.read_s3_file(
52+
f"sagemaker-example-files-prod-{region}", "datasets/tabular/synthetic/churn.txt"
53+
)
54+
55+
df = pd.read_csv(StringIO(data))
56+
57+
df = df.drop("Phone", axis=1)
58+
df["Area Code"] = df["Area Code"].astype(object)
59+
df = df.drop(["Day Charge", "Eve Charge", "Night Charge", "Intl Charge"], axis=1)
60+
61+
model_data = pd.get_dummies(df)
62+
model_data = pd.concat(
63+
[
64+
model_data["Churn?_True."],
65+
model_data.drop(["Churn?_False.", "Churn?_True."], axis=1),
66+
],
67+
axis=1,
68+
)
69+
model_data = model_data.astype(float)
70+
71+
train_data2, validation_data = train_test_split(model_data, test_size=0.33, random_state=42)
72+
73+
validation_data, test_data2 = train_test_split(validation_data, test_size=0.33, random_state=42)
74+
75+
test_target_column = test_data2["Churn?_True."]
76+
test_data2.drop(["Churn?_True."], axis=1, inplace=True)
77+
78+
train_data2.to_csv("train2.csv", header=False, index=False)
79+
validation_data.to_csv("validation.csv", header=False, index=False)
80+
test_data2.to_csv("test.csv", header=False, index=False)
81+
82+
s3_train_input = sagemaker_session.upload_data("train2.csv", bucket)
83+
s3_validation_input = sagemaker_session.upload_data("validation.csv", bucket)
84+
s3_test_input = sagemaker_session.upload_data("test.csv", bucket)
85+
86+
image2 = "246618743249.dkr.ecr.us-west-2.amazonaws.com/sagemaker-xgboost:1.7-1"
87+
instance_type = "ml.m4.xlarge"
88+
instance_count = 1
89+
volume_size_in_gb = 30
90+
max_runtime_in_seconds = 600
91+
92+
93+
class TestSageMakerCore(unittest.TestCase):
94+
95+
def test_hyperparameter_tuning_job_and_transform_job(self):
96+
############ Create training jobs resource
97+
job_name = "xgboost-churn-" + time.strftime(
98+
"%Y-%m-%d-%H-%M-%S", time.gmtime()
99+
) # Name of training job
100+
instance_type = "ml.m4.xlarge" # SageMaker instance type to use for training
101+
instance_count = 1 # Number of instances to use for training
102+
volume_size_in_gb = 30 # Amount of storage to allocate to training job
103+
max_runtime_in_seconds = 600 # Maximum runtimt. Job exits if it doesn't finish before this
104+
s3_output_path = f"s3://{bucket}" # bucket and optional prefix where the training job stores output artifacts, like model artifact.
105+
106+
hyper_parameters = {
107+
"max_depth": "5",
108+
"eta": "0.2",
109+
"gamma": "4",
110+
"min_child_weight": "6",
111+
"subsample": "0.8",
112+
"verbosity": "0",
113+
"objective": "binary:logistic",
114+
"num_round": "100",
115+
}
116+
117+
training_job = TrainingJob.create(
118+
training_job_name=job_name,
119+
hyper_parameters=hyper_parameters,
120+
algorithm_specification=AlgorithmSpecification(
121+
training_image=image2, training_input_mode="File"
122+
),
123+
role_arn=role,
124+
input_data_config=[
125+
Channel(
126+
channel_name="train",
127+
content_type="csv",
128+
data_source=DataSource(
129+
s3_data_source=S3DataSource(
130+
s3_data_type="S3Prefix",
131+
s3_uri=s3_train_input,
132+
s3_data_distribution_type="FullyReplicated",
133+
)
134+
),
135+
),
136+
Channel(
137+
channel_name="validation",
138+
content_type="csv",
139+
data_source=DataSource(
140+
s3_data_source=S3DataSource(
141+
s3_data_type="S3Prefix",
142+
s3_uri=s3_validation_input,
143+
s3_data_distribution_type="FullyReplicated",
144+
)
145+
),
146+
),
147+
],
148+
output_data_config=OutputDataConfig(s3_output_path=s3_output_path),
149+
resource_config=ResourceConfig(
150+
instance_type=instance_type,
151+
instance_count=instance_count,
152+
volume_size_in_gb=volume_size_in_gb,
153+
),
154+
stopping_condition=StoppingCondition(max_runtime_in_seconds=max_runtime_in_seconds),
155+
)
156+
157+
training_job.wait()
158+
159+
########### Create and test HyperParameterTuningJob
160+
tuning_job_name = "xgboost-tune-" + time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime())
161+
max_number_of_training_jobs = 50
162+
max_parallel_training_jobs = 5
163+
max_runtime_in_seconds = 3600
164+
s3_output_path = f"s3://{bucket}/tuningjob"
165+
166+
hyper_parameter_training_job_definition = HyperParameterTrainingJobDefinition(
167+
role_arn=role,
168+
algorithm_specification=HyperParameterAlgorithmSpecification(
169+
training_image=image2, training_input_mode="File"
170+
),
171+
input_data_config=[
172+
Channel(
173+
channel_name="train",
174+
content_type="csv",
175+
data_source=DataSource(
176+
s3_data_source=S3DataSource(
177+
s3_data_type="S3Prefix",
178+
s3_uri=s3_train_input,
179+
s3_data_distribution_type="FullyReplicated",
180+
)
181+
),
182+
),
183+
Channel(
184+
channel_name="validation",
185+
content_type="csv",
186+
data_source=DataSource(
187+
s3_data_source=S3DataSource(
188+
s3_data_type="S3Prefix",
189+
s3_uri=s3_validation_input,
190+
s3_data_distribution_type="FullyReplicated",
191+
)
192+
),
193+
),
194+
],
195+
output_data_config=OutputDataConfig(s3_output_path=s3_output_path),
196+
stopping_condition=StoppingCondition(max_runtime_in_seconds=max_runtime_in_seconds),
197+
resource_config=ResourceConfig(
198+
instance_type=instance_type,
199+
instance_count=instance_count,
200+
volume_size_in_gb=volume_size_in_gb,
201+
),
202+
)
203+
204+
tuning_job_config = HyperParameterTuningJobConfig(
205+
strategy="Bayesian",
206+
hyper_parameter_tuning_job_objective=HyperParameterTuningJobObjective(
207+
type="Maximize", metric_name="validation:auc"
208+
),
209+
resource_limits=ResourceLimits(
210+
max_number_of_training_jobs=max_number_of_training_jobs,
211+
max_parallel_training_jobs=max_parallel_training_jobs,
212+
max_runtime_in_seconds=3600,
213+
),
214+
training_job_early_stopping_type="Auto",
215+
parameter_ranges=ParameterRanges(
216+
auto_parameters=[
217+
AutoParameter(name="max_depth", value_hint="5"),
218+
AutoParameter(name="eta", value_hint="0.1"),
219+
AutoParameter(name="gamma", value_hint="8"),
220+
AutoParameter(name="min_child_weight", value_hint="2"),
221+
AutoParameter(name="subsample", value_hint="0.5"),
222+
AutoParameter(name="num_round", value_hint="50"),
223+
]
224+
),
225+
)
226+
227+
tuning_job = HyperParameterTuningJob.create(
228+
hyper_parameter_tuning_job_name=tuning_job_name,
229+
autotune=Autotune(mode="Enabled"),
230+
training_job_definition=hyper_parameter_training_job_definition,
231+
hyper_parameter_tuning_job_config=tuning_job_config,
232+
)
233+
234+
tuning_job.wait()
235+
236+
fetch_tuning_job = HyperParameterTuningJob.get(
237+
hyper_parameter_tuning_job_name=tuning_job_name
238+
)
239+
assert (
240+
fetch_tuning_job.training_job_definition.output_data_config.s3_output_path
241+
== s3_output_path
242+
)
243+
assert fetch_tuning_job.hyper_parameter_tuning_job_config.strategy == "Bayesian"
244+
245+
creation_time_after = datetime.datetime.now() - datetime.timedelta(days=5)
246+
247+
resource_iterator = HyperParameterTuningJob.get_all(creation_time_after=creation_time_after)
248+
tuning_jobs = [job.hyper_parameter_tuning_job_name for job in resource_iterator]
249+
250+
assert len(tuning_jobs) > 0
251+
assert tuning_job_name in tuning_jobs
252+
253+
########### Create Model resource for transform job use
254+
model_s3_uri = TrainingJob.get(
255+
tuning_job.best_training_job.training_job_name
256+
).model_artifacts.s3_model_artifacts
257+
model_name_for_tranformjob = (
258+
f'customer-churn-xgboost-{time.strftime("%H-%M-%S", time.gmtime())}'
259+
)
260+
customer_churn_model = Model.create(
261+
model_name=model_name_for_tranformjob,
262+
primary_container=ContainerDefinition(image=image2, model_data_url=model_s3_uri),
263+
execution_role_arn=role,
264+
)
265+
266+
########### Create and test Transform jobs
267+
s3_output_path = f"s3://{bucket}/transform"
268+
transform_job_name = "churn-prediction" + time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime())
269+
270+
transform_job = TransformJob.create(
271+
transform_job_name=transform_job_name,
272+
model_name=model_name_for_tranformjob,
273+
transform_input=TransformInput(
274+
data_source=TransformDataSource(
275+
s3_data_source=TransformS3DataSource(
276+
s3_data_type="S3Prefix", s3_uri=s3_test_input
277+
)
278+
),
279+
content_type="text/csv",
280+
),
281+
transform_output=TransformOutput(s3_output_path=s3_output_path),
282+
transform_resources=TransformResources(
283+
instance_type=instance_type, instance_count=instance_count
284+
),
285+
)
286+
287+
transform_job.wait()
288+
289+
fetch_transform_job = TransformJob.get(transform_job_name=transform_job_name)
290+
assert fetch_transform_job.transform_output.s3_output_path == s3_output_path
291+
292+
creation_time_after = datetime.datetime.now() - datetime.timedelta(days=5)
293+
294+
resource_iterator = TransformJob.get_all(creation_time_after=creation_time_after)
295+
transform_jobs = [job.transform_job_name for job in resource_iterator]
296+
297+
assert len(transform_jobs) > 0
298+
assert transform_job_name in transform_jobs

0 commit comments

Comments
 (0)