Skip to content

Commit 5828ce4

Browse files
committed
lint
1 parent 13401f8 commit 5828ce4

18 files changed

+923
-755
lines changed

language/automl/automl_natural_language_dataset.py

Lines changed: 93 additions & 90 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@
2626
import os
2727

2828
from google.cloud import automl_v1beta1 as automl
29+
2930
# [END automl_natural_language_import]
3031

3132

@@ -46,34 +47,34 @@ def create_dataset(project_id, compute_region, dataset_name, multilabel=False):
4647
project_location = client.location_path(project_id, compute_region)
4748

4849
# Classification type is assigned based on multilabel value.
49-
classification_type = 'MULTICLASS'
50+
classification_type = "MULTICLASS"
5051
if multilabel:
51-
classification_type = 'MULTILABEL'
52+
classification_type = "MULTILABEL"
5253

5354
# Specify the text classification type for the dataset.
54-
dataset_metadata = {
55-
'classification_type': classification_type
56-
}
55+
dataset_metadata = {"classification_type": classification_type}
5756

5857
# Set dataset name and metadata.
5958
my_dataset = {
60-
'display_name': dataset_name,
61-
'text_classification_dataset_metadata': dataset_metadata
59+
"display_name": dataset_name,
60+
"text_classification_dataset_metadata": dataset_metadata,
6261
}
6362

6463
# Create a dataset with the dataset metadata in the region.
6564
dataset = client.create_dataset(project_location, my_dataset)
6665

6766
# Display the dataset information.
68-
print('Dataset name: {}'.format(dataset.name))
69-
print('Dataset id: {}'.format(dataset.name.split('/')[-1]))
70-
print('Dataset display name: {}'.format(dataset.display_name))
71-
print('Text classification dataset metadata:')
72-
print('\t{}'.format(dataset.text_classification_dataset_metadata))
73-
print('Dataset example count: {}'.format(dataset.example_count))
74-
print('Dataset create time:')
75-
print('\tseconds: {}'.format(dataset.create_time.seconds))
76-
print('\tnanos: {}'.format(dataset.create_time.nanos))
67+
print("Dataset name: {}".format(dataset.name))
68+
print("Dataset id: {}".format(dataset.name.split("/")[-1]))
69+
print("Dataset display name: {}".format(dataset.display_name))
70+
print("Text classification dataset metadata:")
71+
print("\t{}".format(dataset.text_classification_dataset_metadata))
72+
print("Dataset example count: {}".format(dataset.example_count))
73+
print("Dataset create time:")
74+
print("\tseconds: {}".format(dataset.create_time.seconds))
75+
print("\tnanos: {}".format(dataset.create_time.nanos))
76+
77+
7778
# [END automl_natural_language_create_dataset]
7879

7980

@@ -93,18 +94,20 @@ def list_datasets(project_id, compute_region, filter_):
9394
# List all the datasets available in the region by applying filter.
9495
response = client.list_datasets(project_location, filter_)
9596

96-
print('List of datasets:')
97+
print("List of datasets:")
9798
for dataset in response:
9899
# Display the dataset information.
99-
print('Dataset name: {}'.format(dataset.name))
100-
print('Dataset id: {}'.format(dataset.name.split('/')[-1]))
101-
print('Dataset display name: {}'.format(dataset.display_name))
102-
print('Text classification dataset metadata:')
103-
print('\t{}'.format(dataset.text_classification_dataset_metadata))
104-
print('Dataset example count: {}'.format(dataset.example_count))
105-
print('Dataset create time:')
106-
print('\tseconds: {}'.format(dataset.create_time.seconds))
107-
print('\tnanos: {}'.format(dataset.create_time.nanos))
100+
print("Dataset name: {}".format(dataset.name))
101+
print("Dataset id: {}".format(dataset.name.split("/")[-1]))
102+
print("Dataset display name: {}".format(dataset.display_name))
103+
print("Text classification dataset metadata:")
104+
print("\t{}".format(dataset.text_classification_dataset_metadata))
105+
print("Dataset example count: {}".format(dataset.example_count))
106+
print("Dataset create time:")
107+
print("\tseconds: {}".format(dataset.create_time.seconds))
108+
print("\tnanos: {}".format(dataset.create_time.nanos))
109+
110+
108111
# [END automl_natural_language_list_datasets]
109112

110113

@@ -119,22 +122,23 @@ def get_dataset(project_id, compute_region, dataset_id):
119122
client = automl.AutoMlClient()
120123

121124
# Get the full path of the dataset
122-
dataset_full_id = client.dataset_path(
123-
project_id, compute_region, dataset_id)
125+
dataset_full_id = client.dataset_path(project_id, compute_region, dataset_id)
124126

125127
# Get complete detail of the dataset.
126128
dataset = client.get_dataset(dataset_full_id)
127129

128130
# Display the dataset information.
129-
print('Dataset name: {}'.format(dataset.name))
130-
print('Dataset id: {}'.format(dataset.name.split('/')[-1]))
131-
print('Dataset display name: {}'.format(dataset.display_name))
132-
print('Text classification dataset metadata:')
133-
print('\t{}'.format(dataset.text_classification_dataset_metadata))
134-
print('Dataset example count: {}'.format(dataset.example_count))
135-
print('Dataset create time:')
136-
print('\tseconds: {}'.format(dataset.create_time.seconds))
137-
print('\tnanos: {}'.format(dataset.create_time.nanos))
131+
print("Dataset name: {}".format(dataset.name))
132+
print("Dataset id: {}".format(dataset.name.split("/")[-1]))
133+
print("Dataset display name: {}".format(dataset.display_name))
134+
print("Text classification dataset metadata:")
135+
print("\t{}".format(dataset.text_classification_dataset_metadata))
136+
print("Dataset example count: {}".format(dataset.example_count))
137+
print("Dataset create time:")
138+
print("\tseconds: {}".format(dataset.create_time.seconds))
139+
print("\tnanos: {}".format(dataset.create_time.nanos))
140+
141+
138142
# [END automl_natural_language_get_dataset]
139143

140144

@@ -152,21 +156,20 @@ def import_data(project_id, compute_region, dataset_id, path):
152156
client = automl.AutoMlClient()
153157

154158
# Get the full path of the dataset.
155-
dataset_full_id = client.dataset_path(
156-
project_id, compute_region, dataset_id)
159+
dataset_full_id = client.dataset_path(project_id, compute_region, dataset_id)
157160

158161
# Get the multiple Google Cloud Storage URIs.
159-
input_uris = path.split(',')
160-
input_config = {'gcs_source': {
161-
'input_uris': input_uris
162-
}}
162+
input_uris = path.split(",")
163+
input_config = {"gcs_source": {"input_uris": input_uris}}
163164

164165
# Import the dataset from the input URI.
165166
response = client.import_data(dataset_full_id, input_config)
166167

167-
print('Processing import...')
168+
print("Processing import...")
168169
# synchronous check of operation status.
169-
print('Data imported. {}'.format(response.result()))
170+
print("Data imported. {}".format(response.result()))
171+
172+
170173
# [END automl_natural_language_import_data]
171174

172175

@@ -182,20 +185,19 @@ def export_data(project_id, compute_region, dataset_id, output_uri):
182185
client = automl.AutoMlClient()
183186

184187
# Get the full path of the dataset.
185-
dataset_full_id = client.dataset_path(
186-
project_id, compute_region, dataset_id)
188+
dataset_full_id = client.dataset_path(project_id, compute_region, dataset_id)
187189

188190
# Set the output URI
189-
output_config = {'gcs_destination': {
190-
'output_uri_prefix': output_uri
191-
}}
191+
output_config = {"gcs_destination": {"output_uri_prefix": output_uri}}
192192

193193
# Export the data to the output URI.
194194
response = client.export_data(dataset_full_id, output_config)
195195

196-
print('Processing export...')
196+
print("Processing export...")
197197
# synchronous check of operation status.
198-
print('Data exported. {}'.format(response.result()))
198+
print("Data exported. {}".format(response.result()))
199+
200+
199201
# [END automl_natural_language_export_data]
200202

201203

@@ -210,69 +212,70 @@ def delete_dataset(project_id, compute_region, dataset_id):
210212
client = automl.AutoMlClient()
211213

212214
# Get the full path of the dataset.
213-
dataset_full_id = client.dataset_path(
214-
project_id, compute_region, dataset_id)
215+
dataset_full_id = client.dataset_path(project_id, compute_region, dataset_id)
215216

216217
# Delete a dataset.
217218
response = client.delete_dataset(dataset_full_id)
218219

219220
# synchronous check of operation status.
220-
print('Dataset deleted. {}'.format(response.result()))
221+
print("Dataset deleted. {}".format(response.result()))
222+
223+
221224
# [END automl_natural_language_delete_dataset]
222225

223226

224-
if __name__ == '__main__':
227+
if __name__ == "__main__":
225228
parser = argparse.ArgumentParser(
226-
description=__doc__,
227-
formatter_class=argparse.RawDescriptionHelpFormatter)
228-
subparsers = parser.add_subparsers(dest='command')
229+
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
230+
)
231+
subparsers = parser.add_subparsers(dest="command")
229232

230233
create_dataset_parser = subparsers.add_parser(
231-
'create_dataset', help=create_dataset.__doc__)
232-
create_dataset_parser.add_argument('dataset_name')
234+
"create_dataset", help=create_dataset.__doc__
235+
)
236+
create_dataset_parser.add_argument("dataset_name")
233237
create_dataset_parser.add_argument(
234-
'multilabel', nargs='?', choices=['False', 'True'], default='False')
238+
"multilabel", nargs="?", choices=["False", "True"], default="False"
239+
)
235240

236241
list_datasets_parser = subparsers.add_parser(
237-
'list_datasets', help=list_datasets.__doc__)
242+
"list_datasets", help=list_datasets.__doc__
243+
)
238244
list_datasets_parser.add_argument(
239-
'filter_', nargs='?', default='text_classification_dataset_metadata:*')
245+
"filter_", nargs="?", default="text_classification_dataset_metadata:*"
246+
)
240247

241-
get_dataset_parser = subparsers.add_parser(
242-
'get_dataset', help=get_dataset.__doc__)
243-
get_dataset_parser.add_argument('dataset_id')
248+
get_dataset_parser = subparsers.add_parser("get_dataset", help=get_dataset.__doc__)
249+
get_dataset_parser.add_argument("dataset_id")
244250

245-
import_data_parser = subparsers.add_parser(
246-
'import_data', help=import_data.__doc__)
247-
import_data_parser.add_argument('dataset_id')
248-
import_data_parser.add_argument('path')
251+
import_data_parser = subparsers.add_parser("import_data", help=import_data.__doc__)
252+
import_data_parser.add_argument("dataset_id")
253+
import_data_parser.add_argument("path")
249254

250-
export_data_parser = subparsers.add_parser(
251-
'export_data', help=export_data.__doc__)
252-
export_data_parser.add_argument('dataset_id')
253-
export_data_parser.add_argument('output_uri')
255+
export_data_parser = subparsers.add_parser("export_data", help=export_data.__doc__)
256+
export_data_parser.add_argument("dataset_id")
257+
export_data_parser.add_argument("output_uri")
254258

255259
delete_dataset_parser = subparsers.add_parser(
256-
'delete_dataset', help=delete_dataset.__doc__)
257-
delete_dataset_parser.add_argument('dataset_id')
260+
"delete_dataset", help=delete_dataset.__doc__
261+
)
262+
delete_dataset_parser.add_argument("dataset_id")
258263

259-
project_id = os.environ['PROJECT_ID']
260-
compute_region = os.environ['REGION_NAME']
264+
project_id = os.environ["PROJECT_ID"]
265+
compute_region = os.environ["REGION_NAME"]
261266

262267
args = parser.parse_args()
263268

264-
if args.command == 'create_dataset':
265-
multilabel = True if args.multilabel == 'True' else False
266-
create_dataset(
267-
project_id, compute_region, args.dataset_name, multilabel)
268-
if args.command == 'list_datasets':
269+
if args.command == "create_dataset":
270+
multilabel = True if args.multilabel == "True" else False
271+
create_dataset(project_id, compute_region, args.dataset_name, multilabel)
272+
if args.command == "list_datasets":
269273
list_datasets(project_id, compute_region, args.filter_)
270-
if args.command == 'get_dataset':
274+
if args.command == "get_dataset":
271275
get_dataset(project_id, compute_region, args.dataset_id)
272-
if args.command == 'import_data':
276+
if args.command == "import_data":
273277
import_data(project_id, compute_region, args.dataset_id, args.path)
274-
if args.command == 'export_data':
275-
export_data(
276-
project_id, compute_region, args.dataset_id, args.output_uri)
277-
if args.command == 'delete_dataset':
278+
if args.command == "export_data":
279+
export_data(project_id, compute_region, args.dataset_id, args.output_uri)
280+
if args.command == "delete_dataset":
278281
delete_dataset(project_id, compute_region, args.dataset_id)

0 commit comments

Comments
 (0)