Skip to content

change Using logging instead of prints #4133

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Dec 21, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion src/sagemaker/base_predictor.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,9 @@
LOGGER = logging.getLogger("sagemaker")


logger = logging.getLogger(__name__)


class PredictorBase(abc.ABC):
"""An object that encapsulates a deployed model."""

Expand Down Expand Up @@ -714,7 +717,7 @@ def list_monitors(self):
endpoint_name=self.endpoint_name
)
if len(monitoring_schedules_dict["MonitoringScheduleSummaries"]) == 0:
print("No monitors found for endpoint. endpoint: {}".format(self.endpoint_name))
logger.debug("No monitors found for endpoint. endpoint: %s", self.endpoint_name)
return []

monitors = []
Expand Down
5 changes: 3 additions & 2 deletions src/sagemaker/cli/compatibility/v2/files.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,8 @@

from sagemaker.cli.compatibility.v2.ast_transformer import ASTTransformer

LOGGER = logging.getLogger(__name__)
# Setting LOGGER for backward compatibility, in case users import this...
logger = LOGGER = logging.getLogger(__name__)


class FileUpdater(object):
Expand Down Expand Up @@ -59,7 +60,7 @@ def _make_output_dirs_if_needed(self):
os.makedirs(output_dir)

if os.path.exists(self.output_path):
LOGGER.warning("Overwriting file %s", self.output_path)
logger.warning("Overwriting file %s", self.output_path)


class PyFileUpdater(FileUpdater):
Expand Down
2 changes: 1 addition & 1 deletion src/sagemaker/fw_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -474,7 +474,7 @@ def tar_and_upload_dir(
if s3_resource is None:
s3_resource = session.resource("s3", region_name=session.region_name)
else:
print("Using provided s3_resource")
logger.debug("Using provided s3_resource")

s3_resource.Object(bucket, key).upload_file(tar_file, ExtraArgs=extra_args)
finally:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,8 @@
"mxnet": "MXNET",
}

LOGGER = logging.getLogger("sagemaker")
# Setting LOGGER for backward compatibility, in case users import it...
logger = LOGGER = logging.getLogger("sagemaker")


class Phase:
Expand Down Expand Up @@ -145,10 +146,10 @@ def right_size(
)

if endpoint_configurations or traffic_pattern or stopping_conditions or resource_limit:
LOGGER.info("Advanced Job parameters were specified. Running Advanced job...")
logger.info("Advanced Job parameters were specified. Running Advanced job...")
job_type = "Advanced"
else:
LOGGER.info("Advanced Job parameters were not specified. Running Default job...")
logger.info("Advanced Job parameters were not specified. Running Default job...")
job_type = "Default"

self._init_sagemaker_session_if_does_not_exist()
Expand All @@ -173,7 +174,7 @@ def right_size(
vpc_config=self.vpc_config,
enable_network_isolation=self.enable_network_isolation(),
)
LOGGER.warning("Attempting to create new model with name %s", self.name)
logger.warning("Attempting to create new model with name %s", self.name)
self.sagemaker_session.create_model(**create_model_args)

ret_name = self.sagemaker_session.create_inference_recommendations_job(
Expand Down Expand Up @@ -281,23 +282,23 @@ def _update_params_for_right_size(
if accelerator_type:
raise ValueError("accelerator_type is not compatible with right_size().")
if instance_type or initial_instance_count:
LOGGER.warning(
logger.warning(
"instance_type or initial_instance_count specified."
"Overriding right_size() recommendations."
)
return None
if async_inference_config:
LOGGER.warning(
logger.warning(
"async_inference_config is specified. Overriding right_size() recommendations."
)
return None
if serverless_inference_config:
LOGGER.warning(
logger.warning(
"serverless_inference_config is specified. Overriding right_size() recommendations."
)
return None
if explainer_config:
LOGGER.warning(
logger.warning(
"explainer_config is specified. Overriding right_size() recommendations."
)
return None
Expand Down Expand Up @@ -359,7 +360,7 @@ def _update_params_for_recommendation_id(
"""

if instance_type is not None and initial_instance_count is not None:
LOGGER.warning(
logger.warning(
"Both instance_type and initial_instance_count are specified,"
"overriding the recommendation result."
)
Expand Down
21 changes: 12 additions & 9 deletions src/sagemaker/local/entities.py
Original file line number Diff line number Diff line change
Expand Up @@ -683,8 +683,10 @@ def start(self, **kwargs):
)

self._executions[execution_id] = execution
print(
f"Starting execution for pipeline {self.pipeline.name}. Execution ID is {execution_id}"
logger.info(
"Starting execution for pipeline %s. Execution ID is %s",
self.pipeline.name,
execution_id,
)
self.last_modified_time = datetime.datetime.now().timestamp()

Expand Down Expand Up @@ -771,31 +773,32 @@ def update_execution_success(self):
"""Mark execution as succeeded."""
self.status = _LocalExecutionStatus.SUCCEEDED.value
self.last_modified_time = datetime.datetime.now().timestamp()
print(f"Pipeline execution {self.pipeline_execution_name} SUCCEEDED")
logger.info("Pipeline execution %s SUCCEEDED", self.pipeline_execution_name)

def update_execution_failure(self, step_name, failure_message):
"""Mark execution as failed."""
self.status = _LocalExecutionStatus.FAILED.value
self.failure_reason = f"Step '{step_name}' failed with message: {failure_message}"
self.last_modified_time = datetime.datetime.now().timestamp()
print(
f"Pipeline execution {self.pipeline_execution_name} FAILED because step "
f"'{step_name}' failed."
logger.info(
"Pipeline execution %s FAILED because step '%s' failed.",
self.pipeline_execution_name,
step_name,
)

def update_step_properties(self, step_name, step_properties):
"""Update pipeline step execution output properties."""
self.step_execution.get(step_name).update_step_properties(step_properties)
print(f"Pipeline step '{step_name}' SUCCEEDED.")
logger.info("Pipeline step '%s' SUCCEEDED.", step_name)

def update_step_failure(self, step_name, failure_message):
"""Mark step_name as failed."""
print(f"Pipeline step '{step_name}' FAILED. Failure message is: {failure_message}")
logger.info("Pipeline step '%s' FAILED. Failure message is: %s", step_name, failure_message)
self.step_execution.get(step_name).update_step_failure(failure_message)

def mark_step_executing(self, step_name):
"""Update pipelines step's status to EXECUTING and start_time to now."""
print(f"Starting pipeline step: '{step_name}'")
logger.info("Starting pipeline step: '%s'", step_name)
self.step_execution.get(step_name).mark_step_executing()

def _initialize_step_execution(self, steps):
Expand Down
4 changes: 2 additions & 2 deletions src/sagemaker/local/image.py
Original file line number Diff line number Diff line change
Expand Up @@ -230,7 +230,7 @@ def process(

# Print our Job Complete line to have a similar experience to training on SageMaker where
# you see this line at the end.
print("===== Job Complete =====")
logger.info("===== Job Complete =====")

def train(self, input_data_config, output_data_config, hyperparameters, environment, job_name):
"""Run a training job locally using docker-compose.
Expand Down Expand Up @@ -310,7 +310,7 @@ def train(self, input_data_config, output_data_config, hyperparameters, environm

# Print our Job Complete line to have a similar experience to training on SageMaker where
# you see this line at the end.
print("===== Job Complete =====")
logger.info("===== Job Complete =====")
return artifacts

def serve(self, model_dir, environment):
Expand Down
11 changes: 6 additions & 5 deletions src/sagemaker/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,8 @@
from sagemaker.enums import EndpointType
from sagemaker.session import get_add_model_package_inference_args

LOGGER = logging.getLogger("sagemaker")
# Setting LOGGER for backward compatibility, in case users import it...
logger = LOGGER = logging.getLogger("sagemaker")

NEO_ALLOWED_FRAMEWORKS = set(
["mxnet", "tensorflow", "keras", "pytorch", "onnx", "xgboost", "tflite"]
Expand Down Expand Up @@ -737,7 +738,7 @@ def _upload_code(self, key_prefix: str, repack: bool = False) -> None:
script_name=os.path.basename(self.entry_point),
)

LOGGER.info(
logger.info(
"Repacking model artifact (%s), script artifact "
"(%s), and dependencies (%s) "
"into single tar.gz file located at %s. "
Expand Down Expand Up @@ -1258,13 +1259,13 @@ def compile(
self.image_uri = job_status.get("InferenceImage", None)
self._is_compiled_model = True
else:
LOGGER.warning(
logger.warning(
"The instance type %s is not supported for deployment via SageMaker."
"Please deploy the model manually.",
target_instance_family,
)
else:
LOGGER.warning(
logger.warning(
"Devices described by Target Platform OS, Architecture and Accelerator are not"
"supported for deployment via SageMaker. Please deploy the model manually."
)
Expand Down Expand Up @@ -1484,7 +1485,7 @@ def deploy(
and instance_type.startswith("ml.inf")
and not self._is_compiled_model
):
LOGGER.warning(
logger.warning(
"Your model is not compiled. Please compile your model before using Inferentia."
)

Expand Down
37 changes: 19 additions & 18 deletions src/sagemaker/model_monitor/clarify_model_monitoring.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,8 @@
from sagemaker.clarify import SageMakerClarifyProcessor, ModelPredictedLabelConfig
from sagemaker.lineage._utils import get_resource_name_from_arn

_LOGGER = logging.getLogger(__name__)
# Setting _LOGGER for backward compatibility, in case users import it...
logger = _LOGGER = logging.getLogger(__name__)


class ClarifyModelMonitor(mm.ModelMonitor):
Expand Down Expand Up @@ -223,7 +224,7 @@ def _upload_analysis_config(self, analysis_config, output_s3_uri, job_definition
str(uuid.uuid4()),
"analysis_config.json",
)
_LOGGER.info("Uploading analysis config to {s3_uri}.")
logger.info("Uploading analysis config to {s3_uri}.")
return s3.S3Uploader.upload_string_as_file_body(
json.dumps(analysis_config),
desired_s3_uri=s3_uri,
Expand Down Expand Up @@ -604,7 +605,7 @@ def create_monitoring_schedule(
"Monitoring Schedule. To create another, first delete the existing one "
"using my_monitor.delete_monitoring_schedule()."
)
_LOGGER.error(message)
logger.error(message)
raise ValueError(message)

if (batch_transform_input is not None) ^ (endpoint_input is None):
Expand All @@ -613,7 +614,7 @@ def create_monitoring_schedule(
"Amazon Model Monitoring Schedule. "
"Please provide only one of the above required inputs"
)
_LOGGER.error(message)
logger.error(message)
raise ValueError(message)

self._check_monitoring_schedule_cron_validity(
Expand Down Expand Up @@ -667,15 +668,15 @@ def create_monitoring_schedule(
self.job_definition_name = new_job_definition_name
self.monitoring_schedule_name = monitor_schedule_name
except Exception:
_LOGGER.exception("Failed to create monitoring schedule.")
logger.exception("Failed to create monitoring schedule.")
# noinspection PyBroadException
try:
self.sagemaker_session.sagemaker_client.delete_model_bias_job_definition(
JobDefinitionName=new_job_definition_name
)
except Exception: # pylint: disable=W0703
message = "Failed to delete job definition {}.".format(new_job_definition_name)
_LOGGER.exception(message)
logger.exception(message)
raise

# noinspection PyMethodOverriding
Expand Down Expand Up @@ -756,7 +757,7 @@ def update_monitoring_schedule(
"Amazon Model Monitoring Schedule. "
"Please provide only one of the above required inputs"
)
_LOGGER.error(message)
logger.error(message)
raise ValueError(message)

# Only need to update schedule expression
Expand Down Expand Up @@ -820,15 +821,15 @@ def update_monitoring_schedule(
if network_config is not None:
self.network_config = network_config
except Exception:
_LOGGER.exception("Failed to update monitoring schedule.")
logger.exception("Failed to update monitoring schedule.")
# noinspection PyBroadException
try:
self.sagemaker_session.sagemaker_client.delete_model_bias_job_definition(
JobDefinitionName=new_job_definition_name
)
except Exception: # pylint: disable=W0703
message = "Failed to delete job definition {}.".format(new_job_definition_name)
_LOGGER.exception(message)
logger.exception(message)
raise

def delete_monitoring_schedule(self):
Expand All @@ -838,7 +839,7 @@ def delete_monitoring_schedule(self):
message = "Deleting Model Bias Job Definition with name: {}".format(
self.job_definition_name
)
_LOGGER.info(message)
logger.info(message)
self.sagemaker_session.sagemaker_client.delete_model_bias_job_definition(
JobDefinitionName=self.job_definition_name
)
Expand Down Expand Up @@ -1045,7 +1046,7 @@ def create_monitoring_schedule(
"Monitoring Schedule. To create another, first delete the existing one "
"using my_monitor.delete_monitoring_schedule()."
)
_LOGGER.error(message)
logger.error(message)
raise ValueError(message)

if (batch_transform_input is not None) ^ (endpoint_input is None):
Expand All @@ -1054,7 +1055,7 @@ def create_monitoring_schedule(
"Amazon Model Monitoring Schedule."
"Please provide only one of the above required inputs"
)
_LOGGER.error(message)
logger.error(message)
raise ValueError(message)

self._check_monitoring_schedule_cron_validity(
Expand Down Expand Up @@ -1107,15 +1108,15 @@ def create_monitoring_schedule(
self.job_definition_name = new_job_definition_name
self.monitoring_schedule_name = monitor_schedule_name
except Exception:
_LOGGER.exception("Failed to create monitoring schedule.")
logger.exception("Failed to create monitoring schedule.")
# noinspection PyBroadException
try:
self.sagemaker_session.sagemaker_client.delete_model_explainability_job_definition(
JobDefinitionName=new_job_definition_name
)
except Exception: # pylint: disable=W0703
message = "Failed to delete job definition {}.".format(new_job_definition_name)
_LOGGER.exception(message)
logger.exception(message)
raise

# noinspection PyMethodOverriding
Expand Down Expand Up @@ -1198,7 +1199,7 @@ def update_monitoring_schedule(
"Amazon Model Monitoring Schedule. "
"Please provide only one of the above required inputs"
)
_LOGGER.error(message)
logger.error(message)
raise ValueError(message)

# Only need to update schedule expression
Expand Down Expand Up @@ -1265,15 +1266,15 @@ def update_monitoring_schedule(
if network_config is not None:
self.network_config = network_config
except Exception:
_LOGGER.exception("Failed to update monitoring schedule.")
logger.exception("Failed to update monitoring schedule.")
# noinspection PyBroadException
try:
self.sagemaker_session.sagemaker_client.delete_model_explainability_job_definition(
JobDefinitionName=new_job_definition_name
)
except Exception: # pylint: disable=W0703
message = "Failed to delete job definition {}.".format(new_job_definition_name)
_LOGGER.exception(message)
logger.exception(message)
raise

def delete_monitoring_schedule(self):
Expand All @@ -1283,7 +1284,7 @@ def delete_monitoring_schedule(self):
message = "Deleting Model Explainability Job Definition with name: {}".format(
self.job_definition_name
)
_LOGGER.info(message)
logger.info(message)
self.sagemaker_session.sagemaker_client.delete_model_explainability_job_definition(
JobDefinitionName=self.job_definition_name
)
Expand Down
Loading