Skip to content

change: enable logging-format-interpolation pylint check #904

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Jul 5, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion .pylintrc
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,6 @@ disable=
too-many-instance-attributes,
line-too-long, # We let Flake8 take care of this # TODO: Fix these and stop relying on flake8
len-as-condition, # TODO: Enable this check once pylint 2.4.0 is released and consumed due to the fix in https://github.com/PyCQA/pylint/issues/2684
logging-format-interpolation, # TODO: Fix logging so as to remove this.
import-error, # TODO: Fix import errors
logging-not-lazy, # TODO: Fix logging
attribute-defined-outside-init, # TODO: Fix scope
Expand Down
6 changes: 3 additions & 3 deletions src/sagemaker/amazon/amazon_estimator.py
Original file line number Diff line number Diff line change
Expand Up @@ -195,11 +195,11 @@ def record_set(self, train, labels=None, channel="train", encrypt=False):
bucket, key_prefix = parsed_s3_url.netloc, parsed_s3_url.path
key_prefix = key_prefix + "{}-{}/".format(type(self).__name__, sagemaker_timestamp())
key_prefix = key_prefix.lstrip("/")
logger.debug("Uploading to bucket {} and key_prefix {}".format(bucket, key_prefix))
logger.debug("Uploading to bucket %s and key_prefix %s", bucket, key_prefix)
manifest_s3_file = upload_numpy_to_s3_shards(
self.train_instance_count, s3, bucket, key_prefix, train, labels, encrypt
)
logger.debug("Created manifest file {}".format(manifest_s3_file))
logger.debug("Created manifest file %s", manifest_s3_file)
return RecordSet(
manifest_s3_file,
num_records=train.shape[0],
Expand Down Expand Up @@ -279,7 +279,7 @@ def upload_numpy_to_s3_shards(
shard_index_string = str(shard_index).zfill(len(str(len(shards))))
file_name = "matrix_{}.pbr".format(shard_index_string)
key = key_prefix + file_name
logger.debug("Creating object {} in bucket {}".format(key, bucket))
logger.debug("Creating object %s in bucket %s", key, bucket)
s3.Object(bucket, key).put(Body=file, **extra_put_kwargs)
uploaded_files.append(file_name)
manifest_key = key_prefix + ".amazon.manifest"
Expand Down
8 changes: 4 additions & 4 deletions src/sagemaker/cli/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,9 +108,9 @@ def start(self):
data_url = self.upload_training_data()
estimator = self.create_estimator()
estimator.fit(data_url)
logger.debug("code location: {}".format(estimator.uploaded_code.s3_prefix))
logger.debug("code location: %s", estimator.uploaded_code.s3_prefix)
logger.debug(
"model location: {}{}/output/model.tar.gz".format(
estimator.output_path, estimator._current_job_name
)
"model location: %s%s/output/model.tar.gz",
estimator.output_path,
estimator._current_job_name,
)
2 changes: 1 addition & 1 deletion src/sagemaker/cli/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ def configure_logging(args):
def main():
args = parse_arguments(sys.argv[1:])
configure_logging(args)
logger.debug("args: {}".format(args))
logger.debug("args: %s", args)
args.func(args)


Expand Down
5 changes: 2 additions & 3 deletions src/sagemaker/estimator.py
Original file line number Diff line number Diff line change
Expand Up @@ -687,9 +687,8 @@ def start_new(cls, estimator, inputs):
if isinstance(inputs, s3_input):
if "InputMode" in inputs.config:
logging.debug(
"Selecting s3_input's input_mode ({}) for TrainingInputMode.".format(
inputs.config["InputMode"]
)
"Selecting s3_input's input_mode (%s) for TrainingInputMode.",
inputs.config["InputMode"],
)
train_args["input_mode"] = inputs.config["InputMode"]

Expand Down
10 changes: 5 additions & 5 deletions src/sagemaker/local/image.py
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@ def serve(self, model_dir, environment):
logger.info("serving")

self.container_root = self._create_tmp_folder()
logger.info("creating hosting dir in {}".format(self.container_root))
logger.info("creating hosting dir in %s", self.container_root)

volumes = self._prepare_serving_volumes(model_dir)

Expand Down Expand Up @@ -424,7 +424,7 @@ def _generate_compose_file(self, command, additional_volumes=None, additional_en

docker_compose_path = os.path.join(self.container_root, DOCKER_COMPOSE_FILENAME)
yaml_content = yaml.dump(content, default_flow_style=False)
logger.info("docker compose file: \n{}".format(yaml_content))
logger.info("docker compose file: \n%s", yaml_content)
with open(docker_compose_path, "w") as f:
f.write(yaml_content)

Expand All @@ -445,7 +445,7 @@ def _compose(self, detached=False):
if detached:
command.append("-d")

logger.info("docker command: {}".format(" ".join(command)))
logger.info("docker command: %s", " ".join(command))
return command

def _create_docker_host(self, host, environment, optml_subdirs, command, volumes):
Expand Down Expand Up @@ -739,7 +739,7 @@ def _ecr_login_if_needed(boto_session, image):

def _pull_image(image):
pull_image_command = ("docker pull %s" % image).strip()
logger.info("docker command: {}".format(pull_image_command))
logger.info("docker command: %s", pull_image_command)

subprocess.check_output(pull_image_command, shell=True)
logger.info("image pulled: {}".format(image))
logger.info("image pulled: %s", image)
5 changes: 3 additions & 2 deletions src/sagemaker/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -298,8 +298,9 @@ def compile(
self._is_compiled_model = True
else:
LOGGER.warning(
"The intance type {} is not supported to deploy via SageMaker,"
"please deploy the model on the device by yourself.".format(target_instance_family)
"The instance type %s is not supported to deploy via SageMaker,"
"please deploy the model manually.",
target_instance_family,
)
return self

Expand Down
5 changes: 2 additions & 3 deletions src/sagemaker/rl/estimator.py
Original file line number Diff line number Diff line change
Expand Up @@ -363,9 +363,8 @@ def _validate_images_args(cls, toolkit, toolkit_version, framework, image_name):
if found_args:
logger.warning(
"Parameter `image_name` is specified, "
"`{}` are going to be ignored when choosing the image.".format(
"`, `".join(found_args)
)
"`%s` are going to be ignored when choosing the image.",
"`, `".join(found_args),
)

@classmethod
Expand Down
50 changes: 24 additions & 26 deletions src/sagemaker/session.py
Original file line number Diff line number Diff line change
Expand Up @@ -206,7 +206,7 @@ def default_bucket(self):
Bucket=default_bucket, CreateBucketConfiguration={"LocationConstraint": region}
)

LOGGER.info("Created S3 bucket: {}".format(default_bucket))
LOGGER.info("Created S3 bucket: %s", default_bucket)
except ClientError as e:
error_code = e.response["Error"]["Code"]
message = e.response["Error"]["Message"]
Expand Down Expand Up @@ -343,8 +343,8 @@ def train( # noqa: C901
if encrypt_inter_container_traffic:
train_request["EnableInterContainerTrafficEncryption"] = encrypt_inter_container_traffic

LOGGER.info("Creating training-job with name: {}".format(job_name))
LOGGER.debug("train request: {}".format(json.dumps(train_request, indent=4)))
LOGGER.info("Creating training-job with name: %s", job_name)
LOGGER.debug("train request: %s", json.dumps(train_request, indent=4))
self.sagemaker_client.create_training_job(**train_request)

def compile_model(
Expand Down Expand Up @@ -379,7 +379,7 @@ def compile_model(
if tags is not None:
compilation_job_request["Tags"] = tags

LOGGER.info("Creating compilation-job with name: {}".format(job_name))
LOGGER.info("Creating compilation-job with name: %s", job_name)
self.sagemaker_client.create_compilation_job(**compilation_job_request)

def tune(
Expand Down Expand Up @@ -521,8 +521,8 @@ def tune(
if encrypt_inter_container_traffic:
tune_request["TrainingJobDefinition"]["EnableInterContainerTrafficEncryption"] = True

LOGGER.info("Creating hyperparameter tuning job with name: {}".format(job_name))
LOGGER.debug("tune request: {}".format(json.dumps(tune_request, indent=4)))
LOGGER.info("Creating hyperparameter tuning job with name: %s", job_name)
LOGGER.debug("tune request: %s", json.dumps(tune_request, indent=4))
self.sagemaker_client.create_hyper_parameter_tuning_job(**tune_request)

def stop_tuning_job(self, name):
Expand All @@ -535,18 +535,17 @@ def stop_tuning_job(self, name):
ClientError: If an error occurs while trying to stop the hyperparameter tuning job.
"""
try:
LOGGER.info("Stopping tuning job: {}".format(name))
LOGGER.info("Stopping tuning job: %s", name)
self.sagemaker_client.stop_hyper_parameter_tuning_job(HyperParameterTuningJobName=name)
except ClientError as e:
error_code = e.response["Error"]["Code"]
# allow to pass if the job already stopped
if error_code == "ValidationException":
LOGGER.info("Tuning job: {} is already stopped or not running.".format(name))
LOGGER.info("Tuning job: %s is already stopped or not running.", name)
else:
LOGGER.error(
"Error occurred while attempting to stop tuning job: {}. Please try again.".format(
name
)
"Error occurred while attempting to stop tuning job: %s. Please try again.",
name,
)
raise

Expand Down Expand Up @@ -608,8 +607,8 @@ def transform(
if data_processing is not None:
transform_request["DataProcessing"] = data_processing

LOGGER.info("Creating transform job with name: {}".format(job_name))
LOGGER.debug("Transform request: {}".format(json.dumps(transform_request, indent=4)))
LOGGER.info("Creating transform job with name: %s", job_name)
LOGGER.debug("Transform request: %s", json.dumps(transform_request, indent=4))
self.sagemaker_client.create_transform_job(**transform_request)

def create_model(
Expand Down Expand Up @@ -681,8 +680,8 @@ def create_model(
if enable_network_isolation:
create_model_request["EnableNetworkIsolation"] = True

LOGGER.info("Creating model with name: {}".format(name))
LOGGER.debug("CreateModel request: {}".format(json.dumps(create_model_request, indent=4)))
LOGGER.info("Creating model with name: %s", name)
LOGGER.debug("CreateModel request: %s", json.dumps(create_model_request, indent=4))

try:
self.sagemaker_client.create_model(**create_model_request)
Expand All @@ -694,7 +693,7 @@ def create_model(
error_code == "ValidationException"
and "Cannot create already existing model" in message
):
LOGGER.warning("Using already existing model: {}".format(name))
LOGGER.warning("Using already existing model: %s", name)
else:
raise

Expand Down Expand Up @@ -765,14 +764,14 @@ def create_model_package_from_algorithm(self, name, description, algorithm_arn,
},
}
try:
LOGGER.info("Creating model package with name: {}".format(name))
LOGGER.info("Creating model package with name: %s", name)
self.sagemaker_client.create_model_package(**request)
except ClientError as e:
error_code = e.response["Error"]["Code"]
message = e.response["Error"]["Message"]

if error_code == "ValidationException" and "ModelPackage already exists" in message:
LOGGER.warning("Using already existing model package: {}".format(name))
LOGGER.warning("Using already existing model package: %s", name)
else:
raise

Expand Down Expand Up @@ -833,7 +832,7 @@ def create_endpoint_config(
Returns:
str: Name of the endpoint point configuration created.
"""
LOGGER.info("Creating endpoint-config with name {}".format(name))
LOGGER.info("Creating endpoint-config with name %s", name)

tags = tags or []

Expand Down Expand Up @@ -872,7 +871,7 @@ def create_endpoint(self, endpoint_name, config_name, tags=None, wait=True):
Returns:
str: Name of the Amazon SageMaker ``Endpoint`` created.
"""
LOGGER.info("Creating endpoint with name {}".format(endpoint_name))
LOGGER.info("Creating endpoint with name %s", endpoint_name)

tags = tags or []

Expand Down Expand Up @@ -915,7 +914,7 @@ def delete_endpoint(self, endpoint_name):
Args:
endpoint_name (str): Name of the Amazon SageMaker ``Endpoint`` to delete.
"""
LOGGER.info("Deleting endpoint with name: {}".format(endpoint_name))
LOGGER.info("Deleting endpoint with name: %s", endpoint_name)
self.sagemaker_client.delete_endpoint(EndpointName=endpoint_name)

def delete_endpoint_config(self, endpoint_config_name):
Expand All @@ -924,7 +923,7 @@ def delete_endpoint_config(self, endpoint_config_name):
Args:
endpoint_config_name (str): Name of the Amazon SageMaker endpoint configuration to delete.
"""
LOGGER.info("Deleting endpoint configuration with name: {}".format(endpoint_config_name))
LOGGER.info("Deleting endpoint configuration with name: %s", endpoint_config_name)
self.sagemaker_client.delete_endpoint_config(EndpointConfigName=endpoint_config_name)

def delete_model(self, model_name):
Expand All @@ -934,7 +933,7 @@ def delete_model(self, model_name):
model_name (str): Name of the Amazon SageMaker model to delete.

"""
LOGGER.info("Deleting model with name: {}".format(model_name))
LOGGER.info("Deleting model with name: %s", model_name)
self.sagemaker_client.delete_model(ModelName=model_name)

def wait_for_job(self, job, poll=5):
Expand Down Expand Up @@ -1258,9 +1257,8 @@ def get_caller_identity_arn(self):
role = self.boto_session.client("iam").get_role(RoleName=role_name)["Role"]["Arn"]
except ClientError:
LOGGER.warning(
"Couldn't call 'get_role' to get Role ARN from role name {} to get Role path.".format(
role_name
)
"Couldn't call 'get_role' to get Role ARN from role name %s to get Role path.",
role_name,
)

return role
Expand Down
6 changes: 3 additions & 3 deletions src/sagemaker/tensorflow/estimator.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@
)
_SCRIPT_MODE_TENSORBOARD_WARNING = (
"Tensorboard is not supported with script mode. You can run the following "
"command: tensorboard --logdir {} --host localhost --port 6006 This can be "
"command: tensorboard --logdir %s --host localhost --port 6006 This can be "
"run from anywhere with access to the S3 URI used as the logdir."
)

Expand Down Expand Up @@ -173,7 +173,7 @@ def run(self):
"""Run TensorBoard process."""
port, tensorboard_process = self.create_tensorboard_process()

logger.info("TensorBoard 0.1.7 at http://localhost:{}".format(port))
logger.info("TensorBoard 0.1.7 at http://localhost:%s", port)
while not self.estimator.checkpoint_path:
self.event.wait(1)
with self._temporary_directory() as aws_sync_dir:
Expand Down Expand Up @@ -388,7 +388,7 @@ def fit_super():
raise ValueError("Tensorboard is not supported with async fit")

if self._script_mode_enabled() and run_tensorboard_locally:
logger.warning(_SCRIPT_MODE_TENSORBOARD_WARNING.format(self.model_dir))
logger.warning(_SCRIPT_MODE_TENSORBOARD_WARNING, self.model_dir)
fit_super()
elif run_tensorboard_locally:
tensorboard = Tensorboard(self)
Expand Down
5 changes: 2 additions & 3 deletions src/sagemaker/tuner.py
Original file line number Diff line number Diff line change
Expand Up @@ -734,9 +734,8 @@ def start_new(cls, tuner, inputs):
if isinstance(inputs, s3_input):
if "InputMode" in inputs.config:
logging.debug(
"Selecting s3_input's input_mode ({}) for TrainingInputMode.".format(
inputs.config["InputMode"]
)
"Selecting s3_input's input_mode (%s) for TrainingInputMode.",
inputs.config["InputMode"],
)
tuner_args["input_mode"] = inputs.config["InputMode"]

Expand Down