Skip to content

Commit a5595af

Browse files
authored
Add support for TensorFlow 1.11.0 and bump library version up to 1.12.0 (#439)
* Add support for tensorflow 1.11.0 and bump library version up to 1.12.0
1 parent 5d88412 commit a5595af

File tree

12 files changed

+30
-35
lines changed

12 files changed

+30
-35
lines changed

CHANGELOG.rst

+5
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,11 @@
22
CHANGELOG
33
=========
44

5+
1.12.0
6+
======
7+
8+
* feature: add support for TensorFlow 1.11.0
9+
510
1.11.3
611
======
712

README.rst

+1-1
Original file line numberDiff line numberDiff line change
@@ -283,7 +283,7 @@ TensorFlow SageMaker Estimators
283283
284284
By using TensorFlow SageMaker ``Estimators``, you can train and host TensorFlow models on Amazon SageMaker.
285285
286-
Supported versions of TensorFlow: ``1.4.1``, ``1.5.0``, ``1.6.0``, ``1.7.0``, ``1.8.0``, ``1.9.0``, ``1.10.0``.
286+
Supported versions of TensorFlow: ``1.4.1``, ``1.5.0``, ``1.6.0``, ``1.7.0``, ``1.8.0``, ``1.9.0``, ``1.10.0``, ``1.11.0``.
287287
288288
We recommend that you use the latest supported version, because that's where we focus most of our development efforts.
289289

src/sagemaker/__init__.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -35,4 +35,4 @@
3535
from sagemaker.session import s3_input # noqa: F401
3636
from sagemaker.session import get_execution_role # noqa: F401
3737

38-
__version__ = '1.11.3'
38+
__version__ = '1.12.0'

src/sagemaker/fw_utils.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -159,7 +159,7 @@ def framework_name_from_image(image_name):
159159
str: The image tag
160160
"""
161161
# image name format: <account>.dkr.ecr.<region>.amazonaws.com/sagemaker-<framework>-<py_ver>-<device>:<tag>
162-
sagemaker_pattern = re.compile('^(\d+)(\.)dkr(\.)ecr(\.)(.+)(\.)amazonaws.com(/)(.*:.*)$')
162+
sagemaker_pattern = re.compile(r'^(\d+)(\.)dkr(\.)ecr(\.)(.+)(\.)amazonaws.com(/)(.*:.*)$')
163163
sagemaker_match = sagemaker_pattern.match(image_name)
164164
if sagemaker_match is None:
165165
return None, None, None

src/sagemaker/session.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -1050,8 +1050,8 @@ def _deployment_entity_exists(describe_fn):
10501050
describe_fn()
10511051
return True
10521052
except ClientError as ce:
1053-
if not (ce.response['Error']['Code'] == 'ValidationException' and
1054-
'Could not find' in ce.response['Error']['Message']):
1053+
error_code = ce.response['Error']['Code']
1054+
if not (error_code == 'ValidationException' and 'Could not find' in ce.response['Error']['Message']):
10551055
raise ce
10561056
return False
10571057

src/sagemaker/tensorflow/README.rst

+1-1
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ TensorFlow SageMaker Estimators allow you to run your own TensorFlow
66
training algorithms on SageMaker Learner, and to host your own TensorFlow
77
models on SageMaker Hosting.
88

9-
Supported versions of TensorFlow: ``1.4.1``, ``1.5.0``, ``1.6.0``, ``1.7.0``, ``1.8.0``, ``1.9.0``, ``1.10.0``.
9+
Supported versions of TensorFlow: ``1.4.1``, ``1.5.0``, ``1.6.0``, ``1.7.0``, ``1.8.0``, ``1.9.0``, ``1.10.0``, ``1.11.0``.
1010

1111
Training with TensorFlow
1212
~~~~~~~~~~~~~~~~~~~~~~~~

src/sagemaker/tensorflow/defaults.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -12,4 +12,4 @@
1212
# language governing permissions and limitations under the License.
1313
from __future__ import absolute_import
1414

15-
TF_VERSION = '1.10'
15+
TF_VERSION = '1.11'

tests/conftest.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,7 @@ def sagemaker_local_session(boto_config):
8181

8282
@pytest.fixture(scope='module', params=['1.4', '1.4.1', '1.5', '1.5.0', '1.6', '1.6.0',
8383
'1.7', '1.7.0', '1.8', '1.8.0', '1.9', '1.9.0',
84-
'1.10', '1.10.0'])
84+
'1.10', '1.10.0', '1.11', '1.11.0'])
8585
def tf_version(request):
8686
return request.param
8787

tests/data/cifar_10/source/keras_cnn_cifar_10.py

+11-21
Original file line numberDiff line numberDiff line change
@@ -10,17 +10,12 @@
1010
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
1111
# ANY KIND, either express or implied. See the License for the specific
1212
# language governing permissions and limitations under the License.
13-
from __future__ import absolute_import
14-
from __future__ import division
15-
from __future__ import print_function
16-
17-
import os
13+
from __future__ import absolute_import, division, print_function
1814

1915
import tensorflow as tf
20-
from tensorflow.python.keras.layers import InputLayer, Conv2D, Activation, MaxPooling2D, Dropout, Flatten, Dense
16+
from tensorflow.python.keras.layers import Activation, Conv2D, Dense, Dropout, Flatten, MaxPooling2D
2117
from tensorflow.python.keras.models import Sequential
22-
from tensorflow.python.keras.optimizers import RMSprop
23-
from tensorflow.python.saved_model.signature_constants import PREDICT_INPUTS
18+
from tensorflow.python.training.rmsprop import RMSPropOptimizer
2419

2520
HEIGHT = 32
2621
WIDTH = 32
@@ -29,7 +24,7 @@
2924
NUM_DATA_BATCHES = 5
3025
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 10000 * NUM_DATA_BATCHES
3126
BATCH_SIZE = 128
32-
INPUT_TENSOR_NAME = PREDICT_INPUTS
27+
INPUT_TENSOR_NAME = 'inputs_input' # needs to match the name of the first layer + "_input"
3328

3429

3530
def keras_model_fn(hyperparameters):
@@ -43,10 +38,7 @@ def keras_model_fn(hyperparameters):
4338
"""
4439
model = Sequential()
4540

46-
# TensorFlow Serving default prediction input tensor name is PREDICT_INPUTS.
47-
# We must conform to this naming scheme.
48-
model.add(InputLayer(input_shape=(HEIGHT, WIDTH, DEPTH), name=PREDICT_INPUTS))
49-
model.add(Conv2D(32, (3, 3), padding='same'))
41+
model.add(Conv2D(32, (3, 3), padding='same', name='inputs', input_shape=(HEIGHT, WIDTH, DEPTH)))
5042
model.add(Activation('relu'))
5143
model.add(Conv2D(32, (3, 3)))
5244
model.add(Activation('relu'))
@@ -67,19 +59,17 @@ def keras_model_fn(hyperparameters):
6759
model.add(Dense(NUM_CLASSES))
6860
model.add(Activation('softmax'))
6961

70-
_model = tf.keras.Model(inputs=model.input, outputs=model.output)
71-
72-
opt = RMSprop(lr=hyperparameters['learning_rate'], decay=hyperparameters['decay'])
62+
opt = RMSPropOptimizer(learning_rate=hyperparameters['learning_rate'], decay=hyperparameters['decay'])
7363

74-
_model.compile(loss='categorical_crossentropy',
75-
optimizer=opt,
76-
metrics=['accuracy'])
64+
model.compile(loss='categorical_crossentropy',
65+
optimizer=opt,
66+
metrics=['accuracy'])
7767

78-
return _model
68+
return model
7969

8070

8171
def serving_input_fn(hyperpameters):
82-
inputs = {PREDICT_INPUTS: tf.placeholder(tf.float32, [None, 32, 32, 3])}
72+
inputs = {INPUT_TENSOR_NAME: tf.placeholder(tf.float32, [None, 32, 32, 3])}
8373
return tf.estimator.export.ServingInputReceiver(inputs, inputs)
8474

8575

tests/integ/test_tf_keras.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ def test_keras(sagemaker_session, tf_full_version):
3333
source_dir=script_path,
3434
role='SageMakerRole', sagemaker_session=sagemaker_session,
3535
hyperparameters={'learning_rate': 1e-4, 'decay': 1e-6},
36-
training_steps=500, evaluation_steps=5,
36+
training_steps=50, evaluation_steps=5,
3737
train_instance_count=1, train_instance_type='ml.c4.xlarge',
3838
train_max_run=45 * 60)
3939

tests/integ/test_tuner.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -286,7 +286,7 @@ def test_tuning_chainer(sagemaker_session):
286286

287287
objective_metric_name = 'Validation-accuracy'
288288
metric_definitions = [
289-
{'Name': 'Validation-accuracy', 'Regex': '\[J1\s+\d\.\d+\s+\d\.\d+\s+\d\.\d+\s+(\d\.\d+)'}]
289+
{'Name': 'Validation-accuracy', 'Regex': r'\[J1\s+\d\.\d+\s+\d\.\d+\s+\d\.\d+\s+(\d\.\d+)'}]
290290

291291
tuner = HyperparameterTuner(estimator, objective_metric_name, hyperparameter_ranges, metric_definitions,
292292
max_jobs=2, max_parallel_jobs=2)
@@ -327,7 +327,7 @@ def test_attach_tuning_pytorch(sagemaker_session):
327327

328328
with timeout(minutes=TUNING_DEFAULT_TIMEOUT_MINUTES):
329329
objective_metric_name = 'evaluation-accuracy'
330-
metric_definitions = [{'Name': 'evaluation-accuracy', 'Regex': 'Overall test accuracy: (\d+)'}]
330+
metric_definitions = [{'Name': 'evaluation-accuracy', 'Regex': r'Overall test accuracy: (\d+)'}]
331331
hyperparameter_ranges = {'batch-size': IntegerParameter(50, 100)}
332332

333333
tuner = HyperparameterTuner(estimator, objective_metric_name, hyperparameter_ranges, metric_definitions,

tests/integ/timeout.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -96,8 +96,8 @@ def _show_endpoint_logs(endpoint_name, sagemaker_session):
9696
aws_region=sagemaker_session.boto_session.region_name)
9797
logs.list_logs()
9898
except Exception:
99-
LOGGER.exception('Failure occurred while listing cloudwatch log group %s. ' +
100-
'Swallowing exception but printing stacktrace for debugging.', log_group)
99+
LOGGER.exception('Failure occurred while listing cloudwatch log group %s. Swallowing exception but printing '
100+
'stacktrace for debugging.', log_group)
101101

102102

103103
def _cleanup_endpoint_logs(endpoint_name, sagemaker_session):
@@ -109,5 +109,5 @@ def _cleanup_endpoint_logs(endpoint_name, sagemaker_session):
109109
cwl_client.delete_log_group(logGroupName=log_group)
110110
LOGGER.info('deleted cloudwatch log group: {}'.format(log_group))
111111
except Exception:
112-
LOGGER.exception('Failure occurred while cleaning up cloudwatch log group %s. ' +
112+
LOGGER.exception('Failure occurred while cleaning up cloudwatch log group %s. '
113113
'Swallowing exception but printing stacktrace for debugging.', log_group)

0 commit comments

Comments
 (0)