Skip to content

Commit 89e6100

Browse files
Merge branch 'master' into master
2 parents 6274f5f + 345381e commit 89e6100

File tree

9 files changed

+84
-104
lines changed

9 files changed

+84
-104
lines changed

requirements/extras/test_requirements.txt

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -23,12 +23,7 @@ pyvis==0.2.1
2323
pandas>=1.3.5,<1.5
2424
scikit-learn==1.3.0
2525
cloudpickle==2.2.1
26-
scipy==1.10.1
27-
urllib3>=1.26.8,<3.0.0
28-
docker>=5.0.2,<7.0.0
2926
PyYAML==6.0
30-
pyspark==3.3.1
31-
sagemaker-feature-store-pyspark-3.3
3227
# TODO find workaround
3328
xgboost>=1.6.2,<=1.7.6
3429
pillow>=10.0.1,<=11
@@ -39,4 +34,3 @@ tritonclient[http]<2.37.0
3934
onnx==1.14.1
4035
# tf2onnx==1.15.1
4136
nbformat>=5.9,<6
42-
accelerate>=0.24.1,<=0.27.0

setup.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -85,6 +85,8 @@ def read_requirements(filename):
8585
extras["all"] = [item for group in extras.values() for item in group]
8686
# Tests specific dependencies (do not need to be included in 'all')
8787
test_dependencies = read_requirements("requirements/extras/test_requirements.txt")
88+
# test dependencies are a superset of testing and extra dependencies
89+
test_dependencies.extend(extras["all"])
8890
# remove torch and torchvision if python version is not 3.10
8991
if sys.version_info.minor != 10:
9092
test_dependencies = [

src/sagemaker/serve/builder/model_builder.py

Lines changed: 13 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,6 @@
2020

2121
from pathlib import Path
2222

23-
from accelerate.commands.estimate import estimate_command_parser, gather_data
2423
from sagemaker import Session
2524
from sagemaker.model import Model
2625
from sagemaker.base_predictor import PredictorBase
@@ -43,7 +42,11 @@
4342
from sagemaker.serve.utils import task
4443
from sagemaker.serve.utils.exceptions import TaskNotFoundException
4544
from sagemaker.serve.utils.predictors import _get_local_mode_predictor
46-
from sagemaker.serve.utils.hardware_detector import _get_gpu_info, _get_gpu_info_fallback
45+
from sagemaker.serve.utils.hardware_detector import (
46+
_get_gpu_info,
47+
_get_gpu_info_fallback,
48+
_total_inference_model_size_mib,
49+
)
4750
from sagemaker.serve.detector.image_detector import (
4851
auto_detect_container,
4952
_detect_framework_and_version,
@@ -70,11 +73,8 @@
7073
ModelServer.DJL_SERVING,
7174
}
7275

73-
MIB_CONVERSION_FACTOR = 0.00000095367431640625
74-
MEMORY_BUFFER_MULTIPLIER = 1.2 # 20% buffer
75-
7676

77-
# pylint: disable=attribute-defined-outside-init
77+
# pylint: disable=attribute-defined-outside-init, disable=E1101
7878
@dataclass
7979
class ModelBuilder(Triton, DJL, JumpStart, TGI, Transformers):
8080
"""Class that builds a deployable model.
@@ -719,39 +719,22 @@ def _schema_builder_init(self, model_task: str):
719719
except ValueError:
720720
raise TaskNotFoundException(f"Schema builder for {model_task} could not be found.")
721721

722-
def _total_inference_model_size_mib(self):
723-
"""Calculates the model size from HF accelerate
724-
725-
This function gets the model size from accelerate. It also adds a
726-
padding and converts to size MiB. When performing inference, expect
727-
to add up to an additional 20% to the given model size as found by EleutherAI.
728-
"""
729-
dtypes = self.env_vars.get("dtypes", "float32")
730-
parser = estimate_command_parser()
731-
args = parser.parse_args([self.model, "--dtypes", dtypes])
732-
733-
output = gather_data(
734-
args
735-
) # "dtype", "Largest Layer", "Total Size Bytes", "Training using Adam"
736-
737-
if output is None:
738-
raise ValueError(f"Could not get Model size for {self.model}")
739-
740-
total_memory_size_mib = MEMORY_BUFFER_MULTIPLIER * output[0][2] * MIB_CONVERSION_FACTOR
741-
logger.info("Total memory size MIB: %s", total_memory_size_mib)
742-
return total_memory_size_mib
743-
744722
def _can_fit_on_single_gpu(self) -> Type[bool]:
745723
"""Check if model can fit on a single GPU
746724
747725
If the size of the model is <= single gpu memory size, returns True else False
748726
"""
749727
try:
750728
single_gpu_size_mib = self._try_fetch_gpu_info()
751-
if self._total_inference_model_size_mib() <= single_gpu_size_mib:
729+
if (
730+
_total_inference_model_size_mib(self.model, self.env_vars.get("dtypes", "float32"))
731+
<= single_gpu_size_mib
732+
):
752733
logger.info(
753734
"Total inference model size MIB %s, single GPU size for instance MIB %s",
754-
self._total_inference_model_size_mib(),
735+
_total_inference_model_size_mib(
736+
self.model, self.env_vars.get("dtypes", "float32")
737+
),
755738
single_gpu_size_mib,
756739
)
757740
return True

src/sagemaker/serve/builder/schema_builder.py

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -208,12 +208,18 @@ def _get_inverse(self, obj):
208208

209209
def __repr__(self):
210210
"""Placeholder docstring"""
211+
if hasattr(self, "input_serializer") and hasattr(self, "output_serializer"):
212+
return (
213+
f"SchemaBuilder(\n"
214+
f"input_serializer={self.input_serializer}\n"
215+
f"output_serializer={self.output_serializer}\n"
216+
f"input_deserializer={self.input_deserializer._deserializer}\n"
217+
f"output_deserializer={self.output_deserializer._deserializer})"
218+
)
211219
return (
212220
f"SchemaBuilder(\n"
213-
f"input_serializer={self.input_serializer}\n"
214-
f"output_serializer={self.output_serializer}\n"
215-
f"input_deserializer={self.input_deserializer._deserializer}\n"
216-
f"output_deserializer={self.output_deserializer._deserializer})"
221+
f"custom_input_translator={self.custom_input_translator}\n"
222+
f"custom_output_translator={self.custom_output_translator}\n"
217223
)
218224

219225
def generate_marshalling_map(self) -> dict:

src/sagemaker/serve/utils/hardware_detector.py

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,12 +18,18 @@
1818

1919
from botocore.exceptions import ClientError
2020

21+
from accelerate.commands.estimate import estimate_command_parser, gather_data
2122
from sagemaker import Session
23+
from sagemaker.model import Model
2224
from sagemaker import instance_types_gpu_info
2325

2426
logger = logging.getLogger(__name__)
2527

2628

29+
MIB_CONVERSION_FACTOR = 0.00000095367431640625
30+
MEMORY_BUFFER_MULTIPLIER = 1.2 # 20% buffer
31+
32+
2733
def _get_gpu_info(instance_type: str, session: Session) -> Tuple[int, int]:
2834
"""Get GPU info for the provided instance
2935
@@ -108,3 +114,24 @@ def _format_instance_type(instance_type: str) -> str:
108114

109115
ec2_instance = ".".join(split_instance)
110116
return ec2_instance
117+
118+
119+
def _total_inference_model_size_mib(model: Model, dtype: str) -> int:
120+
"""Calculates the model size from HF accelerate
121+
122+
This function gets the model size from accelerate. It also adds a
123+
padding and converts to size MiB. When performing inference, expect
124+
to add up to an additional 20% to the given model size as found by EleutherAI.
125+
"""
126+
args = estimate_command_parser().parse_args([model, "--dtypes", dtype])
127+
128+
output = gather_data(
129+
args
130+
) # "dtype", "Largest Layer", "Total Size Bytes", "Training using Adam"
131+
132+
if output is None:
133+
raise ValueError(f"Could not get Model size for {model}")
134+
135+
total_memory_size_mib = MEMORY_BUFFER_MULTIPLIER * output[0][2] * MIB_CONVERSION_FACTOR
136+
logger.info("Total memory size MIB: %s", total_memory_size_mib)
137+
return total_memory_size_mib

tests/integ/sagemaker/serve/test_serve_pt_happy.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -181,7 +181,6 @@ def model_builder(request):
181181
# ), f"{caught_ex} was thrown when running pytorch squeezenet local container test"
182182

183183

184-
@pytest.mark.skip(reason="Failing test. Fix is pending.")
185184
@pytest.mark.skipif(
186185
PYTHON_VERSION_IS_NOT_310, # or NOT_RUNNING_ON_INF_EXP_DEV_PIPELINE,
187186
reason="The goal of these test are to test the serving components of our feature",
@@ -222,8 +221,10 @@ def test_happy_pytorch_sagemaker_endpoint(
222221
)
223222
if caught_ex:
224223
logger.exception(caught_ex)
224+
ignore_if_worker_dies = "Worker died." in str(caught_ex)
225+
# https://github.com/pytorch/serve/issues/3032
225226
assert (
226-
False
227+
ignore_if_worker_dies
227228
), f"{caught_ex} was thrown when running pytorch squeezenet sagemaker endpoint test"
228229

229230

tests/integ/test_feature_store.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1536,6 +1536,7 @@ def test_feature_metadata(
15361536
] == feature_group.list_parameters_for_feature_metadata(feature_name=feature_name)
15371537

15381538

1539+
@pytest.mark.skip(reason="Failing test. Fix is pending.")
15391540
def test_search(feature_store_session, role, feature_group_name, pandas_data_frame):
15401541
feature_store = FeatureStore(sagemaker_session=feature_store_session)
15411542
feature_group = FeatureGroup(name=feature_group_name, sagemaker_session=feature_store_session)

tests/unit/sagemaker/serve/builder/test_model_builder.py

Lines changed: 4 additions & 62 deletions
Original file line numberDiff line numberDiff line change
@@ -53,9 +53,6 @@
5353
ModelServer.DJL_SERVING,
5454
}
5555

56-
MIB_CONVERSION_FACTOR = 0.00000095367431640625
57-
MEMORY_BUFFER_MULTIPLIER = 1.2 # 20% buffer
58-
5956
mock_session = MagicMock()
6057

6158

@@ -1205,7 +1202,7 @@ def test_build_for_transformers_happy_case(
12051202

12061203
@patch("sagemaker.serve.builder.model_builder.ModelBuilder._build_for_transformers")
12071204
@patch("sagemaker.serve.builder.model_builder.ModelBuilder._try_fetch_gpu_info")
1208-
@patch("sagemaker.serve.builder.model_builder.ModelBuilder._total_inference_model_size_mib")
1205+
@patch("sagemaker.serve.builder.model_builder._total_inference_model_size_mib")
12091206
@patch("sagemaker.image_uris.retrieve")
12101207
@patch("sagemaker.djl_inference.model.urllib")
12111208
@patch("sagemaker.djl_inference.model.json")
@@ -1248,7 +1245,7 @@ def test_build_for_transformers_happy_case_with_values(
12481245

12491246
@patch("sagemaker.serve.builder.model_builder.ModelBuilder._build_for_djl", Mock())
12501247
@patch("sagemaker.serve.builder.model_builder._get_gpu_info")
1251-
@patch("sagemaker.serve.builder.model_builder.ModelBuilder._total_inference_model_size_mib")
1248+
@patch("sagemaker.serve.builder.model_builder._total_inference_model_size_mib")
12521249
@patch("sagemaker.image_uris.retrieve")
12531250
@patch("sagemaker.djl_inference.model.urllib")
12541251
@patch("sagemaker.djl_inference.model.json")
@@ -1293,7 +1290,7 @@ def test_build_for_transformers_happy_case_with_valid_gpu_info(
12931290
@patch("sagemaker.serve.builder.model_builder.ModelBuilder._build_for_transformers", Mock())
12941291
@patch("sagemaker.serve.builder.model_builder._get_gpu_info")
12951292
@patch("sagemaker.serve.builder.model_builder._get_gpu_info_fallback")
1296-
@patch("sagemaker.serve.builder.model_builder.ModelBuilder._total_inference_model_size_mib")
1293+
@patch("sagemaker.serve.builder.model_builder._total_inference_model_size_mib")
12971294
@patch("sagemaker.image_uris.retrieve")
12981295
@patch("sagemaker.djl_inference.model.urllib")
12991296
@patch("sagemaker.djl_inference.model.json")
@@ -1342,61 +1339,6 @@ def test_build_for_transformers_happy_case_with_valid_gpu_fallback(
13421339
)
13431340
self.assertEqual(model_builder._can_fit_on_single_gpu(), True)
13441341

1345-
@patch("sagemaker.serve.builder.model_builder.ModelBuilder._build_for_transformers", Mock())
1346-
@patch("sagemaker.serve.builder.model_builder.estimate_command_parser")
1347-
@patch("sagemaker.serve.builder.model_builder.gather_data")
1348-
@patch("sagemaker.image_uris.retrieve")
1349-
@patch("sagemaker.djl_inference.model.urllib")
1350-
@patch("sagemaker.djl_inference.model.json")
1351-
@patch("sagemaker.huggingface.llm_utils.urllib")
1352-
@patch("sagemaker.huggingface.llm_utils.json")
1353-
@patch("sagemaker.model_uris.retrieve")
1354-
@patch("sagemaker.serve.builder.model_builder._ServeSettings")
1355-
def test_build_for_transformers_happy_case_hugging_face_responses(
1356-
self,
1357-
mock_serveSettings,
1358-
mock_model_uris_retrieve,
1359-
mock_llm_utils_json,
1360-
mock_llm_utils_urllib,
1361-
mock_model_json,
1362-
mock_model_urllib,
1363-
mock_image_uris_retrieve,
1364-
mock_gather_data,
1365-
mock_parser,
1366-
):
1367-
mock_setting_object = mock_serveSettings.return_value
1368-
mock_setting_object.role_arn = mock_role_arn
1369-
mock_setting_object.s3_model_data_url = mock_s3_model_data_url
1370-
1371-
mock_model_uris_retrieve.side_effect = KeyError
1372-
mock_llm_utils_json.load.return_value = {"pipeline_tag": "text-classification"}
1373-
mock_llm_utils_urllib.request.Request.side_effect = Mock()
1374-
1375-
mock_model_json.load.return_value = {"some": "config"}
1376-
mock_model_urllib.request.Request.side_effect = Mock()
1377-
mock_image_uris_retrieve.return_value = "https://some-image-uri"
1378-
1379-
mock_parser.return_value = Mock()
1380-
mock_gather_data.return_value = [[1, 1, 1, 1]]
1381-
product = MIB_CONVERSION_FACTOR * 1 * MEMORY_BUFFER_MULTIPLIER
1382-
1383-
model_builder = ModelBuilder(
1384-
model="stable-diffusion",
1385-
sagemaker_session=mock_session,
1386-
instance_type=mock_instance_type,
1387-
)
1388-
self.assertEqual(model_builder._total_inference_model_size_mib(), product)
1389-
1390-
mock_parser.return_value = Mock()
1391-
mock_gather_data.return_value = None
1392-
model_builder = ModelBuilder(
1393-
model="stable-diffusion",
1394-
sagemaker_session=mock_session,
1395-
instance_type=mock_instance_type,
1396-
)
1397-
with self.assertRaises(ValueError) as _:
1398-
model_builder._total_inference_model_size_mib()
1399-
14001342
@patch("sagemaker.serve.builder.model_builder.ModelBuilder._build_for_djl")
14011343
@patch("sagemaker.serve.builder.model_builder.ModelBuilder._can_fit_on_single_gpu")
14021344
@patch("sagemaker.image_uris.retrieve")
@@ -1556,7 +1498,7 @@ def test_try_fetch_gpu_info_throws(
15561498
self.assertEqual(model_builder._can_fit_on_single_gpu(), False)
15571499

15581500
@patch("sagemaker.serve.builder.model_builder.ModelBuilder._build_for_transformers", Mock())
1559-
@patch("sagemaker.serve.builder.model_builder.ModelBuilder._total_inference_model_size_mib")
1501+
@patch("sagemaker.serve.builder.model_builder._total_inference_model_size_mib")
15601502
@patch("sagemaker.image_uris.retrieve")
15611503
@patch("sagemaker.djl_inference.model.urllib")
15621504
@patch("sagemaker.djl_inference.model.json")

tests/unit/sagemaker/serve/utils/test_hardware_detector.py

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
from __future__ import absolute_import
1414

1515
from botocore.exceptions import ClientError
16+
from unittest.mock import patch, Mock
1617
import pytest
1718

1819
from sagemaker.serve.utils import hardware_detector
@@ -21,6 +22,8 @@
2122
VALID_INSTANCE_TYPE = "ml.g5.48xlarge"
2223
INVALID_INSTANCE_TYPE = "fl.c5.57xxlarge"
2324
EXPECTED_INSTANCE_GPU_INFO = (8, 196608)
25+
MIB_CONVERSION_FACTOR = 0.00000095367431640625
26+
MEMORY_BUFFER_MULTIPLIER = 1.2 # 20% buffer
2427

2528

2629
def test_get_gpu_info_success(sagemaker_session, boto_session):
@@ -96,3 +99,24 @@ def test_format_instance_type_without_ml_success():
9699
formatted_instance_type = hardware_detector._format_instance_type("g5.48xlarge")
97100

98101
assert formatted_instance_type == "g5.48xlarge"
102+
103+
104+
@patch("sagemaker.serve.utils.hardware_detector.estimate_command_parser")
105+
@patch("sagemaker.serve.utils.hardware_detector.gather_data")
106+
def test_total_inference_model_size_mib(
107+
mock_gather_data,
108+
mock_parser,
109+
):
110+
mock_parser.return_value = Mock()
111+
mock_gather_data.return_value = [[1, 1, 1, 1]]
112+
product = MIB_CONVERSION_FACTOR * 1 * MEMORY_BUFFER_MULTIPLIER
113+
114+
assert (
115+
hardware_detector._total_inference_model_size_mib("stable-diffusion", "float32") == product
116+
)
117+
118+
mock_parser.return_value = Mock()
119+
mock_gather_data.return_value = None
120+
121+
with pytest.raises(ValueError):
122+
hardware_detector._total_inference_model_size_mib("stable-diffusion", "float32")

0 commit comments

Comments
 (0)