Skip to content

Commit eacc07f

Browse files
authored
samples: migrate vision automl samples (#71)
1 parent d7d5fdd commit eacc07f

File tree

9 files changed

+457
-0
lines changed

9 files changed

+457
-0
lines changed
Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,43 @@
1+
#!/usr/bin/env python
2+
3+
# Copyright 2018 Google LLC
4+
#
5+
# Licensed under the Apache License, Version 2.0 (the "License");
6+
# you may not use this file except in compliance with the License.
7+
# You may obtain a copy of the License at
8+
#
9+
# http://www.apache.org/licenses/LICENSE-2.0
10+
#
11+
# Unless required by applicable law or agreed to in writing, software
12+
# distributed under the License is distributed on an "AS IS" BASIS,
13+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14+
# See the License for the specific language governing permissions and
15+
# limitations under the License.
16+
17+
import datetime
18+
import os
19+
20+
from google.cloud import automl_v1beta1 as automl
21+
import pytest
22+
23+
project_id = os.environ["GOOGLE_CLOUD_PROJECT"]
24+
compute_region = "us-central1"
25+
26+
27+
@pytest.mark.skip(reason="creates too many models")
28+
def test_model_create_status_delete(capsys):
29+
# create model
30+
client = automl.AutoMlClient()
31+
model_name = "test_" + datetime.datetime.now().strftime("%Y%m%d%H%M%S")
32+
project_location = client.location_path(project_id, compute_region)
33+
my_model = {
34+
"display_name": model_name,
35+
"dataset_id": "3946265060617537378",
36+
"image_classification_model_metadata": {"train_budget": 24},
37+
}
38+
response = client.create_model(project_location, my_model)
39+
operation_name = response.operation.name
40+
assert operation_name
41+
42+
# cancel operation
43+
response.cancel()

automl/beta/automl_vision_model.py

Lines changed: 93 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,93 @@
1+
#!/usr/bin/env python
2+
3+
# Copyright 2018 Google LLC
4+
#
5+
# Licensed under the Apache License, Version 2.0 (the "License");
6+
# you may not use this file except in compliance with the License.
7+
# You may obtain a copy of the License at
8+
#
9+
# http://www.apache.org/licenses/LICENSE-2.0
10+
#
11+
# Unless required by applicable law or agreed to in writing, software
12+
# distributed under the License is distributed on an "AS IS" BASIS,
13+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14+
# See the License for the specific language governing permissions and
15+
# limitations under the License.
16+
17+
"""This application demonstrates how to perform basic operations on model
18+
with the Google AutoML Vision API.
19+
20+
For more information, the documentation at
21+
https://cloud.google.com/vision/automl/docs.
22+
"""
23+
24+
import argparse
25+
import os
26+
27+
28+
def create_model(
29+
project_id, compute_region, dataset_id, model_name, train_budget=24
30+
):
31+
"""Create a model."""
32+
# [START automl_vision_create_model]
33+
# TODO(developer): Uncomment and set the following variables
34+
# project_id = 'PROJECT_ID_HERE'
35+
# compute_region = 'COMPUTE_REGION_HERE'
36+
# dataset_id = 'DATASET_ID_HERE'
37+
# model_name = 'MODEL_NAME_HERE'
38+
# train_budget = integer amount for maximum cost of model
39+
40+
from google.cloud import automl_v1beta1 as automl
41+
42+
client = automl.AutoMlClient()
43+
44+
# A resource that represents Google Cloud Platform location.
45+
project_location = client.location_path(project_id, compute_region)
46+
47+
# Set model name and model metadata for the image dataset.
48+
my_model = {
49+
"display_name": model_name,
50+
"dataset_id": dataset_id,
51+
"image_classification_model_metadata": {"train_budget": train_budget}
52+
if train_budget
53+
else {},
54+
}
55+
56+
# Create a model with the model metadata in the region.
57+
response = client.create_model(project_location, my_model)
58+
59+
print("Training operation name: {}".format(response.operation.name))
60+
print("Training started...")
61+
62+
# [END automl_vision_create_model]
63+
64+
65+
if __name__ == "__main__":
66+
parser = argparse.ArgumentParser(
67+
description=__doc__,
68+
formatter_class=argparse.RawDescriptionHelpFormatter,
69+
)
70+
subparsers = parser.add_subparsers(dest="command")
71+
72+
create_model_parser = subparsers.add_parser(
73+
"create_model", help=create_model.__doc__
74+
)
75+
create_model_parser.add_argument("dataset_id")
76+
create_model_parser.add_argument("model_name")
77+
create_model_parser.add_argument(
78+
"train_budget", type=int, nargs="?", default=0
79+
)
80+
81+
project_id = os.environ["PROJECT_ID"]
82+
compute_region = os.environ["REGION_NAME"]
83+
84+
args = parser.parse_args()
85+
86+
if args.command == "create_model":
87+
create_model(
88+
project_id,
89+
compute_region,
90+
args.dataset_id,
91+
args.model_name,
92+
args.train_budget,
93+
)
Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,29 @@
1+
# Copyright 2019 Google LLC
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
ARG TF_SERVING_IMAGE_TAG
16+
FROM tensorflow/serving:${TF_SERVING_IMAGE_TAG}
17+
18+
ENV GCS_READ_CACHE_MAX_STALENESS 300
19+
ENV GCS_STAT_CACHE_MAX_AGE 300
20+
ENV GCS_MATCHING_PATHS_CACHE_MAX_AGE 300
21+
22+
EXPOSE 8500
23+
EXPOSE 8501
24+
ENTRYPOINT /usr/bin/tensorflow_model_server \
25+
--port=8500 \
26+
--rest_api_port=8501 \
27+
--model_base_path=/tmp/mounted_model/ \
28+
--tensorflow_session_parallelism=0 \
29+
--file_system_poll_wait_seconds=31540000
Lines changed: 78 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,78 @@
1+
# AutoML Vision Edge Container Prediction
2+
3+
This is an example to show how to predict with AutoML Vision Edge Containers.
4+
The test (automl_vision_edge_container_predict_test.py) shows an automatical way
5+
to run the prediction.
6+
7+
If you want to try the test manually with a sample model, please install
8+
[gsutil tools](https://cloud.google.com/storage/docs/gsutil_install) and
9+
[Docker CE](https://docs.docker.com/install/) first, and then follow the steps
10+
below. All the following instructions with commands assume you are in this
11+
folder with system variables as
12+
13+
```bash
14+
$ CONTAINER_NAME=AutomlVisionEdgeContainerPredict
15+
$ PORT=8505
16+
```
17+
18+
+ Step 1. Pull the Docker image.
19+
20+
```bash
21+
# This is a CPU TFServing 1.14.0 with some default settings compiled from
22+
# https://hub.docker.com/r/tensorflow/serving.
23+
$ DOCKER_GCS_DIR=gcr.io/cloud-devrel-public-resources
24+
$ CPU_DOCKER_GCS_PATH=${DOCKER_GCS_DIR}/gcloud-container-1.14.0:latest
25+
$ sudo docker pull ${CPU_DOCKER_GCS_PATH}
26+
```
27+
28+
+ Step 2. Get a sample saved model.
29+
30+
```bash
31+
$ MODEL_GCS_DIR=gs://cloud-samples-data/vision/edge_container_predict
32+
$ SAMPLE_SAVED_MODEL=${MODEL_GCS_DIR}/saved_model.pb
33+
$ mkdir model_path
34+
$ YOUR_MODEL_PATH=$(realpath model_path)
35+
$ gsutil -m cp ${SAMPLE_SAVED_MODEL} ${YOUR_MODEL_PATH}
36+
```
37+
38+
+ Step 3. Run the Docker container.
39+
40+
```bash
41+
$ sudo docker run --rm --name ${CONTAINER_NAME} -p ${PORT}:8501 -v \
42+
${YOUR_MODEL_PATH}:/tmp/mounted_model/0001 -t ${CPU_DOCKER_GCS_PATH}
43+
```
44+
45+
+ Step 4. Send a prediction request.
46+
47+
```bash
48+
$ python automl_vision_edge_container_predict.py --image_file_path=./test.jpg \
49+
--image_key=1 --port_number=${PORT}
50+
```
51+
52+
The outputs are
53+
54+
```
55+
{
56+
'predictions':
57+
[
58+
{
59+
'scores': [0.0914393, 0.458942, 0.027604, 0.386767, 0.0352474],
60+
labels': ['daisy', 'dandelion', 'roses', 'sunflowers', 'tulips'],
61+
'key': '1'
62+
}
63+
]
64+
}
65+
```
66+
67+
+ Step 5. Stop the container.
68+
69+
```bash
70+
sudo docker stop ${CONTAINER_NAME}
71+
```
72+
73+
Note: The docker image is uploaded with the following command.
74+
75+
```bash
76+
gcloud builds --project=cloud-devrel-public-resources \
77+
submit --config cloudbuild.yaml
78+
```
Lines changed: 80 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,80 @@
1+
#!/usr/bin/env python
2+
3+
# Copyright 2019 Google LLC
4+
#
5+
# Licensed under the Apache License, Version 2.0 (the "License");
6+
# you may not use this file except in compliance with the License.
7+
# You may obtain a copy of the License at
8+
#
9+
# http://www.apache.org/licenses/LICENSE-2.0
10+
#
11+
# Unless required by applicable law or agreed to in writing, software
12+
# distributed under the License is distributed on an "AS IS" BASIS,
13+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14+
# See the License for the specific language governing permissions and
15+
# limitations under the License.
16+
17+
r"""This is an example to call REST API from TFServing docker containers.
18+
19+
Examples:
20+
python automl_vision_edge_container_predict.py \
21+
--image_file_path=./test.jpg --image_key=1 --port_number=8051
22+
23+
"""
24+
25+
import argparse
26+
# [START automl_vision_edge_container_predict]
27+
import base64
28+
import io
29+
import json
30+
31+
import requests
32+
33+
34+
def container_predict(image_file_path, image_key, port_number=8501):
35+
"""Sends a prediction request to TFServing docker container REST API.
36+
37+
Args:
38+
image_file_path: Path to a local image for the prediction request.
39+
image_key: Your chosen string key to identify the given image.
40+
port_number: The port number on your device to accept REST API calls.
41+
Returns:
42+
The response of the prediction request.
43+
"""
44+
45+
with io.open(image_file_path, 'rb') as image_file:
46+
encoded_image = base64.b64encode(image_file.read()).decode('utf-8')
47+
48+
# The example here only shows prediction with one image. You can extend it
49+
# to predict with a batch of images indicated by different keys, which can
50+
# make sure that the responses corresponding to the given image.
51+
instances = {
52+
'instances': [
53+
{'image_bytes': {'b64': str(encoded_image)},
54+
'key': image_key}
55+
]
56+
}
57+
58+
# This example shows sending requests in the same server that you start
59+
# docker containers. If you would like to send requests to other servers,
60+
# please change localhost to IP of other servers.
61+
url = 'http://localhost:{}/v1/models/default:predict'.format(port_number)
62+
63+
response = requests.post(url, data=json.dumps(instances))
64+
print(response.json())
65+
# [END automl_vision_edge_container_predict]
66+
return response.json()
67+
68+
69+
def main():
70+
parser = argparse.ArgumentParser()
71+
parser.add_argument('--image_file_path', type=str)
72+
parser.add_argument('--image_key', type=str, default='1')
73+
parser.add_argument('--port_number', type=int, default=8501)
74+
args = parser.parse_args()
75+
76+
container_predict(args.image_file_path, args.image_key, args.port_number)
77+
78+
79+
if __name__ == '__main__':
80+
main()

0 commit comments

Comments
 (0)