Skip to content

infra: address warnings about pytest custom marks, error message checking, and yaml loading #1640

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Jun 26, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion tests/integ/test_chainer_train.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,6 @@ def test_training_with_additional_hyperparameters(sagemaker_local_session, chain


@pytest.mark.canary_quick
@pytest.mark.regional_testing
def test_attach_deploy(sagemaker_session, chainer_full_version, cpu_instance_type):
with timeout(minutes=TRAINING_DEFAULT_TIMEOUT_MINUTES):
script_path = os.path.join(DATA_DIR, "chainer_mnist", "mnist.py")
Expand Down
3 changes: 0 additions & 3 deletions tests/integ/test_inference_pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,8 +51,6 @@
)


@pytest.mark.continuous_testing
@pytest.mark.regional_testing
def test_inference_pipeline_batch_transform(sagemaker_session, cpu_instance_type):
sparkml_model_data = sagemaker_session.upload_data(
path=os.path.join(SPARKML_DATA_PATH, "mleap_model.tar.gz"),
Expand Down Expand Up @@ -94,7 +92,6 @@ def test_inference_pipeline_batch_transform(sagemaker_session, cpu_instance_type


@pytest.mark.canary_quick
@pytest.mark.regional_testing
@pytest.mark.skip(
reason="This test has always failed, but the failure was masked by a bug. "
"This test should be fixed. Details in https://github.com/aws/sagemaker-python-sdk/pull/968"
Expand Down
2 changes: 0 additions & 2 deletions tests/integ/test_mxnet_train.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,6 @@ def mxnet_training_job(sagemaker_session, mxnet_full_version, cpu_instance_type)


@pytest.mark.canary_quick
@pytest.mark.regional_testing
def test_attach_deploy(mxnet_training_job, sagemaker_session, cpu_instance_type):
endpoint_name = "test-mxnet-attach-deploy-{}".format(sagemaker_timestamp())

Expand Down Expand Up @@ -238,7 +237,6 @@ def test_deploy_model_with_update_non_existing_endpoint(


@pytest.mark.canary_quick
@pytest.mark.regional_testing
@pytest.mark.skipif(
tests.integ.test_region() not in tests.integ.EI_SUPPORTED_REGIONS,
reason="EI isn't supported in that specific region.",
Expand Down
1 change: 0 additions & 1 deletion tests/integ/test_neo_mxnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,6 @@ def mxnet_training_job(sagemaker_session, cpu_instance_type):


@pytest.mark.canary_quick
@pytest.mark.regional_testing
def test_attach_deploy(
mxnet_training_job, sagemaker_session, cpu_instance_type, cpu_instance_family
):
Expand Down
1 change: 0 additions & 1 deletion tests/integ/test_pytorch_train.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,6 @@ def fixture_training_job(sagemaker_session, pytorch_full_version, cpu_instance_t


@pytest.mark.canary_quick
@pytest.mark.regional_testing
@pytest.mark.skipif(
PYTHON_VERSION == "py2",
reason="Python 2 is supported by PyTorch {} and lower versions.".format(LATEST_PY2_VERSION),
Expand Down
1 change: 0 additions & 1 deletion tests/integ/test_sklearn_train.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,6 @@ def test_training_with_network_isolation(


@pytest.mark.canary_quick
@pytest.mark.regional_testing
@pytest.mark.skipif(PYTHON_VERSION != "py3", reason="Scikit-learn image supports only python 3.")
@pytest.mark.skip(
reason="This test has always failed, but the failure was masked by a bug. "
Expand Down
1 change: 0 additions & 1 deletion tests/integ/test_sparkml_serving.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@


@pytest.mark.canary_quick
@pytest.mark.regional_testing
@pytest.mark.skip(
reason="This test has always failed, but the failure was masked by a bug. "
"This test should be fixed. Details in https://github.com/aws/sagemaker-python-sdk/pull/968"
Expand Down
31 changes: 18 additions & 13 deletions tests/unit/sagemaker/automl/test_auto_ml.py
Original file line number Diff line number Diff line change
Expand Up @@ -273,26 +273,30 @@ def test_auto_ml_invalid_input_data_format(sagemaker_session):
)
inputs = {}

expected_error_msg = "Cannot format input {}. Expecting one of str or list of strings."
with pytest.raises(ValueError, message=expected_error_msg.format(inputs)):
with pytest.raises(ValueError) as excinfo:
AutoMLJob.start_new(auto_ml, inputs)

expected_error_msg = "Cannot format input {}. Expecting a string or a list of strings."
assert expected_error_msg.format(inputs) in str(excinfo.value)

sagemaker_session.auto_ml.assert_not_called()


def test_auto_ml_only_one_of_problem_type_and_job_objective_provided(sagemaker_session):
with pytest.raises(
ValueError,
message="One of problem type and objective metric provided. "
"Either both of them should be provided or none of "
"them should be provided.",
):
with pytest.raises(ValueError) as excinfo:
AutoML(
role=ROLE,
target_attribute_name=TARGET_ATTRIBUTE_NAME,
sagemaker_session=sagemaker_session,
problem_type=PROBLEM_TYPE,
)

message = (
"One of problem type and objective metric provided. Either both of them "
"should be provided or none of them should be provided."
)
assert message in str(excinfo.value)


@patch("sagemaker.automl.automl.AutoMLJob.start_new")
def test_auto_ml_fit_set_logs_to_false(start_new, sagemaker_session, caplog):
Expand Down Expand Up @@ -637,15 +641,16 @@ def test_validate_and_update_inference_response():
def test_validate_and_update_inference_response_wrong_input():
cic = copy.copy(CLASSIFICATION_INFERENCE_CONTAINERS)

with pytest.raises(
ValueError,
message="Requested inference output keys [wrong_key, wrong_label] are unsupported. "
"The supported inference keys are [probability, probabilities, predicted_label, labels]",
):
with pytest.raises(ValueError) as excinfo:
AutoML.validate_and_update_inference_response(
inference_containers=cic,
inference_response_keys=["wrong_key", "wrong_label", "probabilities", "probability"],
)
message = (
"Requested inference output keys [wrong_key, wrong_label] are unsupported. "
"The supported inference keys are [probability, probabilities, predicted_label, labels]"
)
assert message in str(excinfo.value)


def test_create_model(sagemaker_session):
Expand Down
14 changes: 7 additions & 7 deletions tests/unit/test_image.py
Original file line number Diff line number Diff line change
Expand Up @@ -370,7 +370,7 @@ def test_train(
assert call_args[i] == v

with open(docker_compose_file, "r") as f:
config = yaml.load(f)
config = yaml.load(f, Loader=yaml.SafeLoader)
assert len(config["services"]) == instance_count
for h in sagemaker_container.hosts:
assert config["services"][h]["image"] == image
Expand Down Expand Up @@ -419,7 +419,7 @@ def test_train_with_hyperparameters_without_job_name(
)

with open(docker_compose_file, "r") as f:
config = yaml.load(f)
config = yaml.load(f, Loader=yaml.SafeLoader)
for h in sagemaker_container.hosts:
assert (
"TRAINING_JOB_NAME={}".format(TRAINING_JOB_NAME)
Expand Down Expand Up @@ -491,7 +491,7 @@ def test_train_local_code(get_data_source_instance, tmpdir, sagemaker_session):
shared_folder_path = os.path.join(sagemaker_container.container_root, "shared")

with open(docker_compose_file, "r") as f:
config = yaml.load(f)
config = yaml.load(f, Loader=yaml.SafeLoader)
assert len(config["services"]) == instance_count

for h in sagemaker_container.hosts:
Expand Down Expand Up @@ -543,7 +543,7 @@ def test_train_local_intermediate_output(get_data_source_instance, tmpdir, sagem
intermediate_folder_path = os.path.join(output_path, "output/intermediate")

with open(docker_compose_file, "r") as f:
config = yaml.load(f)
config = yaml.load(f, Loader=yaml.SafeLoader)
assert len(config["services"]) == instance_count
for h in sagemaker_container.hosts:
assert config["services"][h]["image"] == image
Expand Down Expand Up @@ -596,7 +596,7 @@ def test_serve(tmpdir, sagemaker_session):
)

with open(docker_compose_file, "r") as f:
config = yaml.load(f)
config = yaml.load(f, Loader=yaml.SafeLoader)

for h in sagemaker_container.hosts:
assert config["services"][h]["image"] == image
Expand Down Expand Up @@ -624,7 +624,7 @@ def test_serve_local_code(tmpdir, sagemaker_session):
)

with open(docker_compose_file, "r") as f:
config = yaml.load(f)
config = yaml.load(f, Loader=yaml.SafeLoader)

for h in sagemaker_container.hosts:
assert config["services"][h]["image"] == image
Expand Down Expand Up @@ -657,7 +657,7 @@ def test_serve_local_code_no_env(tmpdir, sagemaker_session):
)

with open(docker_compose_file, "r") as f:
config = yaml.load(f)
config = yaml.load(f, Loader=yaml.SafeLoader)

for h in sagemaker_container.hosts:
assert config["services"][h]["image"] == image
Expand Down
6 changes: 6 additions & 0 deletions tox.ini
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,12 @@ ignore-path=.tox,src/sagemaker.egg-info
# TODO: fix files before enabling max-line-length (D001)
ignore=D001

[pytest]
markers =
canary_quick
cron
local_mode

[testenv]
passenv =
AWS_ACCESS_KEY_ID
Expand Down