Skip to content

Commit 3e7a516

Browse files
authored
Revert "change: update master from dev (#2836)"
This reverts commit be1eea6.
1 parent c2b5f95 commit 3e7a516

26 files changed

+99
-773
lines changed

.gitignore

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -27,5 +27,4 @@ venv/
2727
*.swp
2828
.docker/
2929
env/
30-
.vscode/
31-
.python-version
30+
.vscode/

doc/workflows/pipelines/sagemaker.workflow.pipelines.rst

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -82,12 +82,6 @@ Pipeline
8282
.. autoclass:: sagemaker.workflow.pipeline._PipelineExecution
8383
:members:
8484

85-
Parallelism Configuration
86-
-------------------------
87-
88-
.. autoclass:: sagemaker.workflow.parallelism_config.ParallelismConfiguration
89-
:members:
90-
9185
Pipeline Experiment Config
9286
--------------------------
9387

src/sagemaker/clarify.py

Lines changed: 5 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -290,15 +290,11 @@ def __init__(
290290
probability_threshold (float): An optional value for binary prediction tasks in which
291291
the model returns a probability, to indicate the threshold to convert the
292292
prediction to a boolean value. Default is 0.5.
293-
label_headers (list[str]): List of headers, each for a predicted score in model output.
294-
For bias analysis, it is used to extract the label value with the highest score as
295-
predicted label. For explainability job, It is used to beautify the analysis report
296-
by replacing placeholders like "label0".
293+
label_headers (list): List of label values - one for each score of the ``probability``.
297294
"""
298295
self.label = label
299296
self.probability = probability
300297
self.probability_threshold = probability_threshold
301-
self.label_headers = label_headers
302298
if probability_threshold is not None:
303299
try:
304300
float(probability_threshold)
@@ -1064,10 +1060,10 @@ def run_explainability(
10641060
explainability_config (:class:`~sagemaker.clarify.ExplainabilityConfig` or list):
10651061
Config of the specific explainability method or a list of ExplainabilityConfig
10661062
objects. Currently, SHAP and PDP are the two methods supported.
1067-
model_scores (int or str or :class:`~sagemaker.clarify.ModelPredictedLabelConfig`):
1068-
Index or JSONPath to locate the predicted scores in the model output. This is not
1069-
required if the model output is a single score. Alternatively, it can be an instance
1070-
of ModelPredictedLabelConfig to provide more parameters like label_headers.
1063+
model_scores(str|int|ModelPredictedLabelConfig): Index or JSONPath location in the
1064+
model output for the predicted scores to be explained. This is not required if the
1065+
model output is a single score. Alternatively, an instance of
1066+
ModelPredictedLabelConfig can be provided.
10711067
wait (bool): Whether the call should wait until the job completes (default: True).
10721068
logs (bool): Whether to show the logs produced by the job.
10731069
Only meaningful when ``wait`` is True (default: True).

src/sagemaker/estimator.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2343,7 +2343,6 @@ def _stage_user_code_in_s3(self):
23432343
dependencies=self.dependencies,
23442344
kms_key=kms_key,
23452345
s3_resource=self.sagemaker_session.s3_resource,
2346-
settings=self.sagemaker_session.settings,
23472346
)
23482347

23492348
def _model_source_dir(self):

src/sagemaker/fw_utils.py

Lines changed: 1 addition & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -19,10 +19,8 @@
1919
import shutil
2020
import tempfile
2121
from collections import namedtuple
22-
from typing import Optional
2322

2423
import sagemaker.image_uris
25-
from sagemaker.session_settings import SessionSettings
2624
import sagemaker.utils
2725

2826
from sagemaker.deprecations import renamed_warning
@@ -75,20 +73,7 @@
7573
"2.6.0",
7674
"2.6.2",
7775
],
78-
"pytorch": [
79-
"1.6",
80-
"1.6.0",
81-
"1.7",
82-
"1.7.1",
83-
"1.8",
84-
"1.8.0",
85-
"1.8.1",
86-
"1.9",
87-
"1.9.0",
88-
"1.9.1",
89-
"1.10",
90-
"1.10.0",
91-
],
76+
"pytorch": ["1.6", "1.6.0", "1.7", "1.7.1", "1.8", "1.8.0", "1.8.1", "1.9", "1.9.0", "1.9.1"],
9277
}
9378
SMDISTRIBUTED_SUPPORTED_STRATEGIES = ["dataparallel", "modelparallel"]
9479

@@ -218,7 +203,6 @@ def tar_and_upload_dir(
218203
dependencies=None,
219204
kms_key=None,
220205
s3_resource=None,
221-
settings: Optional[SessionSettings] = None,
222206
):
223207
"""Package source files and upload a compress tar file to S3.
224208
@@ -246,9 +230,6 @@ def tar_and_upload_dir(
246230
s3_resource (boto3.resource("s3")): Optional. Pre-instantiated Boto3 Resource
247231
for S3 connections, can be used to customize the configuration,
248232
e.g. set the endpoint URL (default: None).
249-
settings (sagemaker.session_settings.SessionSettings): Optional. The settings
250-
of the SageMaker ``Session``, can be used to override the default encryption
251-
behavior (default: None).
252233
Returns:
253234
sagemaker.fw_utils.UserCode: An object with the S3 bucket and key (S3 prefix) and
254235
script name.
@@ -260,7 +241,6 @@ def tar_and_upload_dir(
260241
dependencies = dependencies or []
261242
key = "%s/sourcedir.tar.gz" % s3_key_prefix
262243
tmp = tempfile.mkdtemp()
263-
encrypt_artifact = True if settings is None else settings.encrypt_repacked_artifacts
264244

265245
try:
266246
source_files = _list_files_to_compress(script, directory) + dependencies
@@ -270,10 +250,6 @@ def tar_and_upload_dir(
270250

271251
if kms_key:
272252
extra_args = {"ServerSideEncryption": "aws:kms", "SSEKMSKeyId": kms_key}
273-
elif encrypt_artifact:
274-
# encrypt the tarball at rest in S3 with the default AWS managed KMS key for S3
275-
# see https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html#API_PutObject_RequestSyntax
276-
extra_args = {"ServerSideEncryption": "aws:kms"}
277253
else:
278254
extra_args = None
279255

src/sagemaker/image_uri_config/pytorch.json

Lines changed: 2 additions & 70 deletions
Original file line numberDiff line numberDiff line change
@@ -63,8 +63,7 @@
6363
"1.6": "1.6.0",
6464
"1.7": "1.7.1",
6565
"1.8": "1.8.1",
66-
"1.9": "1.9.1",
67-
"1.10": "1.10.0"
66+
"1.9": "1.9.1"
6867
},
6968
"versions": {
7069
"0.4.0": {
@@ -501,39 +500,6 @@
501500
"us-west-2": "763104351884"
502501
},
503502
"repository": "pytorch-inference"
504-
},
505-
"1.10.0": {
506-
"py_versions": [
507-
"py38"
508-
],
509-
"registries": {
510-
"af-south-1": "626614931356",
511-
"ap-east-1": "871362719292",
512-
"ap-northeast-1": "763104351884",
513-
"ap-northeast-2": "763104351884",
514-
"ap-northeast-3": "364406365360",
515-
"ap-south-1": "763104351884",
516-
"ap-southeast-1": "763104351884",
517-
"ap-southeast-2": "763104351884",
518-
"ca-central-1": "763104351884",
519-
"cn-north-1": "727897471807",
520-
"cn-northwest-1": "727897471807",
521-
"eu-central-1": "763104351884",
522-
"eu-north-1": "763104351884",
523-
"eu-west-1": "763104351884",
524-
"eu-west-2": "763104351884",
525-
"eu-west-3": "763104351884",
526-
"eu-south-1": "692866216735",
527-
"me-south-1": "217643126080",
528-
"sa-east-1": "763104351884",
529-
"us-east-1": "763104351884",
530-
"us-east-2": "763104351884",
531-
"us-gov-west-1": "442386744353",
532-
"us-iso-east-1": "886529160074",
533-
"us-west-1": "763104351884",
534-
"us-west-2": "763104351884"
535-
},
536-
"repository": "pytorch-inference"
537503
}
538504
}
539505
},
@@ -553,8 +519,7 @@
553519
"1.6": "1.6.0",
554520
"1.7": "1.7.1",
555521
"1.8": "1.8.1",
556-
"1.9": "1.9.1",
557-
"1.10": "1.10.0"
522+
"1.9": "1.9.1"
558523
},
559524
"versions": {
560525
"0.4.0": {
@@ -992,39 +957,6 @@
992957
"us-west-2": "763104351884"
993958
},
994959
"repository": "pytorch-training"
995-
},
996-
"1.10.0": {
997-
"py_versions": [
998-
"py38"
999-
],
1000-
"registries": {
1001-
"af-south-1": "626614931356",
1002-
"ap-east-1": "871362719292",
1003-
"ap-northeast-1": "763104351884",
1004-
"ap-northeast-2": "763104351884",
1005-
"ap-northeast-3": "364406365360",
1006-
"ap-south-1": "763104351884",
1007-
"ap-southeast-1": "763104351884",
1008-
"ap-southeast-2": "763104351884",
1009-
"ca-central-1": "763104351884",
1010-
"cn-north-1": "727897471807",
1011-
"cn-northwest-1": "727897471807",
1012-
"eu-central-1": "763104351884",
1013-
"eu-north-1": "763104351884",
1014-
"eu-west-1": "763104351884",
1015-
"eu-west-2": "763104351884",
1016-
"eu-west-3": "763104351884",
1017-
"eu-south-1": "692866216735",
1018-
"me-south-1": "217643126080",
1019-
"sa-east-1": "763104351884",
1020-
"us-east-1": "763104351884",
1021-
"us-east-2": "763104351884",
1022-
"us-gov-west-1": "442386744353",
1023-
"us-iso-east-1": "886529160074",
1024-
"us-west-1": "763104351884",
1025-
"us-west-2": "763104351884"
1026-
},
1027-
"repository": "pytorch-training"
1028960
}
1029961
}
1030962
}

src/sagemaker/lineage/query.py

Lines changed: 2 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -83,11 +83,10 @@ def __init__(
8383
self._session = sagemaker_session
8484

8585
def to_lineage_object(self):
86-
"""Convert the ``Vertex`` object to its corresponding Artifact, Action, Context object."""
86+
"""Convert the ``Vertex`` object to its corresponding ``Artifact`` or ``Context`` object."""
8787
from sagemaker.lineage.artifact import Artifact, ModelArtifact
8888
from sagemaker.lineage.context import Context, EndpointContext
8989
from sagemaker.lineage.artifact import DatasetArtifact
90-
from sagemaker.lineage.action import Action
9190

9291
if self.lineage_entity == LineageEntityEnum.CONTEXT.value:
9392
resource_name = get_resource_name_from_arn(self.arn)
@@ -104,9 +103,6 @@ def to_lineage_object(self):
104103
return DatasetArtifact.load(artifact_arn=self.arn, sagemaker_session=self._session)
105104
return Artifact.load(artifact_arn=self.arn, sagemaker_session=self._session)
106105

107-
if self.lineage_entity == LineageEntityEnum.ACTION.value:
108-
return Action.load(action_name=self.arn.split("/")[1], sagemaker_session=self._session)
109-
110106
raise ValueError("Vertex cannot be converted to a lineage object.")
111107

112108

@@ -212,44 +208,6 @@ def _convert_api_response(self, response) -> LineageQueryResult:
212208

213209
return converted
214210

215-
def _collapse_cross_account_artifacts(self, query_response):
216-
"""Collapse the duplicate vertices and edges for cross-account."""
217-
for edge in query_response.edges:
218-
if (
219-
"artifact" in edge.source_arn
220-
and "artifact" in edge.destination_arn
221-
and edge.source_arn.split("/")[1] == edge.destination_arn.split("/")[1]
222-
and edge.source_arn != edge.destination_arn
223-
):
224-
edge_source_arn = edge.source_arn
225-
edge_destination_arn = edge.destination_arn
226-
self._update_cross_account_edge(
227-
edges=query_response.edges,
228-
arn=edge_source_arn,
229-
duplicate_arn=edge_destination_arn,
230-
)
231-
self._update_cross_account_vertex(
232-
query_response=query_response, duplicate_arn=edge_destination_arn
233-
)
234-
235-
# remove the duplicate edges from cross account
236-
new_edge = [e for e in query_response.edges if not e.source_arn == e.destination_arn]
237-
query_response.edges = new_edge
238-
239-
return query_response
240-
241-
def _update_cross_account_edge(self, edges, arn, duplicate_arn):
242-
"""Replace the duplicate arn with arn in edges list."""
243-
for idx, e in enumerate(edges):
244-
if e.destination_arn == duplicate_arn:
245-
edges[idx].destination_arn = arn
246-
elif e.source_arn == duplicate_arn:
247-
edges[idx].source_arn = arn
248-
249-
def _update_cross_account_vertex(self, query_response, duplicate_arn):
250-
"""Remove the vertex with duplicate arn in the vertices list."""
251-
query_response.vertices = [v for v in query_response.vertices if not v.arn == duplicate_arn]
252-
253211
def query(
254212
self,
255213
start_arns: List[str],
@@ -277,7 +235,5 @@ def query(
277235
Filters=query_filter._to_request_dict() if query_filter else {},
278236
MaxDepth=max_depth,
279237
)
280-
query_response = self._convert_api_response(query_response)
281-
query_response = self._collapse_cross_account_artifacts(query_response)
282238

283-
return query_response
239+
return self._convert_api_response(query_response)

src/sagemaker/model.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1131,7 +1131,6 @@ def _upload_code(self, key_prefix, repack=False):
11311131
script=self.entry_point,
11321132
directory=self.source_dir,
11331133
dependencies=self.dependencies,
1134-
settings=self.sagemaker_session.settings,
11351134
)
11361135

11371136
if repack and self.model_data is not None and self.entry_point is not None:

src/sagemaker/model_monitor/clarify_model_monitoring.py

Lines changed: 7 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@
2626
from sagemaker import image_uris, s3
2727
from sagemaker.session import Session
2828
from sagemaker.utils import name_from_base
29-
from sagemaker.clarify import SageMakerClarifyProcessor, ModelPredictedLabelConfig
29+
from sagemaker.clarify import SageMakerClarifyProcessor
3030

3131
_LOGGER = logging.getLogger(__name__)
3232

@@ -833,10 +833,9 @@ def suggest_baseline(
833833
specific explainability method. Currently, only SHAP is supported.
834834
model_config (:class:`~sagemaker.clarify.ModelConfig`): Config of the model and its
835835
endpoint to be created.
836-
model_scores (int or str or :class:`~sagemaker.clarify.ModelPredictedLabelConfig`):
837-
Index or JSONPath to locate the predicted scores in the model output. This is not
838-
required if the model output is a single score. Alternatively, it can be an instance
839-
of ModelPredictedLabelConfig to provide more parameters like label_headers.
836+
model_scores (int or str): Index or JSONPath location in the model output for the
837+
predicted scores to be explained. This is not required if the model output is
838+
a single score.
840839
wait (bool): Whether the call should wait until the job completes (default: False).
841840
logs (bool): Whether to show the logs produced by the job.
842841
Only meaningful when wait is True (default: False).
@@ -866,24 +865,14 @@ def suggest_baseline(
866865
headers = copy.deepcopy(data_config.headers)
867866
if headers and data_config.label in headers:
868867
headers.remove(data_config.label)
869-
if model_scores is None:
870-
inference_attribute = None
871-
label_headers = None
872-
elif isinstance(model_scores, ModelPredictedLabelConfig):
873-
inference_attribute = str(model_scores.label)
874-
label_headers = model_scores.label_headers
875-
else:
876-
inference_attribute = str(model_scores)
877-
label_headers = None
878868
self.latest_baselining_job_config = ClarifyBaseliningConfig(
879869
analysis_config=ExplainabilityAnalysisConfig(
880870
explainability_config=explainability_config,
881871
model_config=model_config,
882872
headers=headers,
883-
label_headers=label_headers,
884873
),
885874
features_attribute=data_config.features,
886-
inference_attribute=inference_attribute,
875+
inference_attribute=model_scores if model_scores is None else str(model_scores),
887876
)
888877
self.latest_baselining_job_name = baselining_job_name
889878
self.latest_baselining_job = ClarifyBaseliningJob(
@@ -1177,7 +1166,7 @@ def attach(cls, monitor_schedule_name, sagemaker_session=None):
11771166
class ExplainabilityAnalysisConfig:
11781167
"""Analysis configuration for ModelExplainabilityMonitor."""
11791168

1180-
def __init__(self, explainability_config, model_config, headers=None, label_headers=None):
1169+
def __init__(self, explainability_config, model_config, headers=None):
11811170
"""Creates an analysis config dictionary.
11821171
11831172
Args:
@@ -1186,19 +1175,13 @@ def __init__(self, explainability_config, model_config, headers=None, label_head
11861175
model_config (sagemaker.clarify.ModelConfig): Config object related to bias
11871176
configurations.
11881177
headers (list[str]): A list of feature names (without label) of model/endpint input.
1189-
label_headers (list[str]): List of headers, each for a predicted score in model output.
1190-
It is used to beautify the analysis report by replacing placeholders like "label0".
1191-
11921178
"""
1193-
predictor_config = model_config.get_predictor_config()
11941179
self.analysis_config = {
11951180
"methods": explainability_config.get_explainability_config(),
1196-
"predictor": predictor_config,
1181+
"predictor": model_config.get_predictor_config(),
11971182
}
11981183
if headers is not None:
11991184
self.analysis_config["headers"] = headers
1200-
if label_headers is not None:
1201-
predictor_config["label_headers"] = label_headers
12021185

12031186
def _to_dict(self):
12041187
"""Generates a request dictionary using the parameters provided to the class."""

0 commit comments

Comments
 (0)