17
17
package com .example .video ;
18
18
19
19
import com .google .api .gax .longrunning .OperationFuture ;
20
- import com .google .cloud .videointelligence .v1beta2 .AnnotateVideoProgress ;
21
- import com .google .cloud .videointelligence .v1beta2 .AnnotateVideoRequest ;
22
- import com .google .cloud .videointelligence .v1beta2 .AnnotateVideoResponse ;
23
- import com .google .cloud .videointelligence .v1beta2 .Entity ;
24
- import com .google .cloud .videointelligence .v1beta2 .ExplicitContentFrame ;
25
- import com .google .cloud .videointelligence .v1beta2 .FaceAnnotation ;
26
- import com .google .cloud .videointelligence .v1beta2 .FaceFrame ;
27
- import com .google .cloud .videointelligence .v1beta2 .FaceSegment ;
28
- import com .google .cloud .videointelligence .v1beta2 .Feature ;
29
- import com .google .cloud .videointelligence .v1beta2 .LabelAnnotation ;
30
- import com .google .cloud .videointelligence .v1beta2 .LabelDetectionConfig ;
31
- import com .google .cloud .videointelligence .v1beta2 .LabelDetectionMode ;
32
- import com .google .cloud .videointelligence .v1beta2 .LabelSegment ;
33
- import com .google .cloud .videointelligence .v1beta2 .NormalizedBoundingBox ;
34
- import com .google .cloud .videointelligence .v1beta2 .VideoAnnotationResults ;
35
- import com .google .cloud .videointelligence .v1beta2 .VideoContext ;
36
- import com .google .cloud .videointelligence .v1beta2 .VideoIntelligenceServiceClient ;
37
- import com .google .cloud .videointelligence .v1beta2 .VideoSegment ;
38
- import com .google .longrunning .Operation ;
20
+ import com .google .cloud .videointelligence .v1 .AnnotateVideoProgress ;
21
+ import com .google .cloud .videointelligence .v1 .AnnotateVideoRequest ;
22
+ import com .google .cloud .videointelligence .v1 .AnnotateVideoResponse ;
23
+ import com .google .cloud .videointelligence .v1 .Entity ;
24
+ import com .google .cloud .videointelligence .v1 .ExplicitContentFrame ;
25
+ import com .google .cloud .videointelligence .v1 .FaceAnnotation ;
26
+ import com .google .cloud .videointelligence .v1 .FaceFrame ;
27
+ import com .google .cloud .videointelligence .v1 .FaceSegment ;
28
+ import com .google .cloud .videointelligence .v1 .Feature ;
29
+ import com .google .cloud .videointelligence .v1 .LabelAnnotation ;
30
+ import com .google .cloud .videointelligence .v1 .LabelSegment ;
31
+ import com .google .cloud .videointelligence .v1 .NormalizedBoundingBox ;
32
+ import com .google .cloud .videointelligence .v1 .VideoAnnotationResults ;
33
+ import com .google .cloud .videointelligence .v1 .VideoIntelligenceServiceClient ;
34
+ import com .google .cloud .videointelligence .v1 .VideoSegment ;
39
35
import com .google .protobuf .ByteString ;
40
36
import java .io .IOException ;
41
37
import java .nio .file .Files ;
@@ -105,17 +101,18 @@ public static void argsHelper(String[] args) throws Exception {
105
101
*/
106
102
public static void analyzeFaces (String gcsUri ) throws Exception {
107
103
// [START detect_faces]
108
- // Instantiate a com.google.cloud.videointelligence.v1beta2 .VideoIntelligenceServiceClient
104
+ // Instantiate a com.google.cloud.videointelligence.v1 .VideoIntelligenceServiceClient
109
105
try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient .create ()) {
110
106
AnnotateVideoRequest request = AnnotateVideoRequest .newBuilder ()
111
107
.setInputUri (gcsUri )
112
108
.addFeatures (Feature .FACE_DETECTION )
113
109
.build ();
114
110
115
111
// asynchronously perform facial analysis on videos
116
- OperationFuture <AnnotateVideoResponse , AnnotateVideoProgress > response
117
- = client .annotateVideoAsync (request );
112
+ OperationFuture <AnnotateVideoResponse , AnnotateVideoProgress > response =
113
+ client .annotateVideoAsync (request );
118
114
115
+ System .out .println ("Waiting for operation to complete..." );
119
116
boolean faceFound = false ;
120
117
for (VideoAnnotationResults results : response .get ().getAnnotationResultsList ()) {
121
118
int faceCount = 0 ;
@@ -166,19 +163,19 @@ public static void analyzeFaces(String gcsUri) throws Exception {
166
163
*/
167
164
public static void analyzeLabels (String gcsUri ) throws Exception {
168
165
// [START detect_labels_gcs]
169
- // Instantiate a com.google.cloud.videointelligence.v1beta2 .VideoIntelligenceServiceClient
166
+ // Instantiate a com.google.cloud.videointelligence.v1 .VideoIntelligenceServiceClient
170
167
try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient .create ()) {
171
168
// Provide path to file hosted on GCS as "gs://bucket-name/..."
172
169
AnnotateVideoRequest request = AnnotateVideoRequest .newBuilder ()
173
170
.setInputUri (gcsUri )
174
171
.addFeatures (Feature .LABEL_DETECTION )
175
172
.build ();
176
173
// Create an operation that will contain the response when the operation completes.
177
- OperationFuture <AnnotateVideoResponse , AnnotateVideoProgress > operation =
174
+ OperationFuture <AnnotateVideoResponse , AnnotateVideoProgress > response =
178
175
client .annotateVideoAsync (request );
179
176
180
177
System .out .println ("Waiting for operation to complete..." );
181
- for (VideoAnnotationResults results : operation .get ().getAnnotationResultsList ()) {
178
+ for (VideoAnnotationResults results : response .get ().getAnnotationResultsList ()) {
182
179
// process video / segment level label annotations
183
180
System .out .println ("Locations: " );
184
181
for (LabelAnnotation labelAnnotation : results .getSegmentLabelAnnotationsList ()) {
@@ -248,7 +245,7 @@ public static void analyzeLabels(String gcsUri) throws Exception {
248
245
*/
249
246
public static void analyzeLabelsFile (String filePath ) throws Exception {
250
247
// [START detect_labels_file]
251
- // Instantiate a com.google.cloud.videointelligence.v1beta2 .VideoIntelligenceServiceClient
248
+ // Instantiate a com.google.cloud.videointelligence.v1 .VideoIntelligenceServiceClient
252
249
try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient .create ()) {
253
250
// Read file and encode into Base64
254
251
Path path = Paths .get (filePath );
@@ -261,11 +258,11 @@ public static void analyzeLabelsFile(String filePath) throws Exception {
261
258
.build ();
262
259
263
260
// Create an operation that will contain the response when the operation completes.
264
- OperationFuture <AnnotateVideoResponse , AnnotateVideoProgress > operation =
261
+ OperationFuture <AnnotateVideoResponse , AnnotateVideoProgress > response =
265
262
client .annotateVideoAsync (request );
266
263
267
264
System .out .println ("Waiting for operation to complete..." );
268
- for (VideoAnnotationResults results : operation .get ().getAnnotationResultsList ()) {
265
+ for (VideoAnnotationResults results : response .get ().getAnnotationResultsList ()) {
269
266
// process video / segment level label annotations
270
267
System .out .println ("Locations: " );
271
268
for (LabelAnnotation labelAnnotation : results .getSegmentLabelAnnotationsList ()) {
@@ -335,7 +332,7 @@ public static void analyzeLabelsFile(String filePath) throws Exception {
335
332
*/
336
333
public static void analyzeShots (String gcsUri ) throws Exception {
337
334
// [START detect_shots]
338
- // Instantiate a com.google.cloud.videointelligence.v1beta2 .VideoIntelligenceServiceClient
335
+ // Instantiate a com.google.cloud.videointelligence.v1 .VideoIntelligenceServiceClient
339
336
try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient .create ()) {
340
337
// Provide path to file hosted on GCS as "gs://bucket-name/..."
341
338
AnnotateVideoRequest request = AnnotateVideoRequest .newBuilder ()
@@ -344,12 +341,12 @@ public static void analyzeShots(String gcsUri) throws Exception {
344
341
.build ();
345
342
346
343
// Create an operation that will contain the response when the operation completes.
347
- OperationFuture <AnnotateVideoResponse , AnnotateVideoProgress > operation =
344
+ OperationFuture <AnnotateVideoResponse , AnnotateVideoProgress > response =
348
345
client .annotateVideoAsync (request );
349
- System .out .println ("Waiting for operation to complete..." );
350
346
347
+ System .out .println ("Waiting for operation to complete..." );
351
348
// Print detected shot changes and their location ranges in the analyzed video.
352
- for (VideoAnnotationResults result : operation .get ().getAnnotationResultsList ()) {
349
+ for (VideoAnnotationResults result : response .get ().getAnnotationResultsList ()) {
353
350
if (result .getShotAnnotationsCount () > 0 ) {
354
351
System .out .println ("Shots: " );
355
352
for (VideoSegment segment : result .getShotAnnotationsList ()) {
@@ -374,21 +371,20 @@ public static void analyzeShots(String gcsUri) throws Exception {
374
371
*/
375
372
public static void analyzeExplicitContent (String gcsUri ) throws Exception {
376
373
// [START detect_explicit_content]
377
- // Instantiate a com.google.cloud.videointelligence.v1beta2 .VideoIntelligenceServiceClient
374
+ // Instantiate a com.google.cloud.videointelligence.v1 .VideoIntelligenceServiceClient
378
375
try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient .create ()) {
379
376
// Create an operation that will contain the response when the operation completes.
380
377
AnnotateVideoRequest request = AnnotateVideoRequest .newBuilder ()
381
378
.setInputUri (gcsUri )
382
379
.addFeatures (Feature .EXPLICIT_CONTENT_DETECTION )
383
380
.build ();
384
381
385
- OperationFuture <AnnotateVideoResponse , AnnotateVideoProgress > operation =
382
+ OperationFuture <AnnotateVideoResponse , AnnotateVideoProgress > response =
386
383
client .annotateVideoAsync (request );
387
384
388
385
System .out .println ("Waiting for operation to complete..." );
389
-
390
386
// Print detected annotations and their positions in the analyzed video.
391
- for (VideoAnnotationResults result : operation .get ().getAnnotationResultsList ()) {
387
+ for (VideoAnnotationResults result : response .get ().getAnnotationResultsList ()) {
392
388
for (ExplicitContentFrame frame : result .getExplicitAnnotation ().getFramesList ()) {
393
389
double frameTime =
394
390
frame .getTimeOffset ().getSeconds () + frame .getTimeOffset ().getNanos () / 1e9 ;
0 commit comments