Skip to content

Commit 08ac2fb

Browse files
nnegreychingor13
authored andcommitted
samples: Add video v1p1beta samples for face detection and video transcription (#1070)
* Add video v1p1beta samples for face detection and video transcription * Update based on Feedback * Clean up READMEs * Add timeout for tests
1 parent 75e79b2 commit 08ac2fb

File tree

2 files changed

+3
-91
lines changed

2 files changed

+3
-91
lines changed

video/src/main/java/com/example/video/Detect.java

Lines changed: 3 additions & 74 deletions
Original file line numberDiff line numberDiff line change
@@ -22,13 +22,9 @@
2222
import com.google.cloud.videointelligence.v1.AnnotateVideoResponse;
2323
import com.google.cloud.videointelligence.v1.Entity;
2424
import com.google.cloud.videointelligence.v1.ExplicitContentFrame;
25-
import com.google.cloud.videointelligence.v1.FaceAnnotation;
26-
import com.google.cloud.videointelligence.v1.FaceFrame;
27-
import com.google.cloud.videointelligence.v1.FaceSegment;
2825
import com.google.cloud.videointelligence.v1.Feature;
2926
import com.google.cloud.videointelligence.v1.LabelAnnotation;
3027
import com.google.cloud.videointelligence.v1.LabelSegment;
31-
import com.google.cloud.videointelligence.v1.NormalizedBoundingBox;
3228
import com.google.cloud.videointelligence.v1.VideoAnnotationResults;
3329
import com.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient;
3430
import com.google.cloud.videointelligence.v1.VideoSegment;
@@ -42,12 +38,10 @@
4238

4339
public class Detect {
4440
/**
45-
* Detects entities,sentiment and syntax in a document using the Natural Language API.
41+
* Detects labels, shots, and explicit content in a video using the Video Intelligence API
4642
* @param args specifies features to detect and the path to the video on Google Cloud Storage.
47-
*
48-
* @throws IOException on Input/Output errors.
4943
*/
50-
public static void main(String[] args) throws Exception {
44+
public static void main(String[] args) {
5145
try {
5246
argsHelper(args);
5347
} catch (Exception e) {
@@ -68,7 +62,7 @@ public static void argsHelper(String[] args) throws Exception {
6862
System.out.printf(
6963
"\tjava %s \"<command>\" \"<path-to-video>\"\n"
7064
+ "Commands:\n"
71-
+ "\tfaces | labels | shots\n"
65+
+ "\tlabels | shots\n"
7266
+ "Path:\n\tA URI for a Cloud Storage resource (gs://...)\n"
7367
+ "Examples: ",
7468
Detect.class.getCanonicalName());
@@ -77,9 +71,6 @@ public static void argsHelper(String[] args) throws Exception {
7771
String command = args[0];
7872
String path = args.length > 1 ? args[1] : "";
7973

80-
if (command.equals("faces")) {
81-
analyzeFaces(path);
82-
}
8374
if (command.equals("labels")) {
8475
analyzeLabels(path);
8576
}
@@ -94,68 +85,6 @@ public static void argsHelper(String[] args) throws Exception {
9485
}
9586
}
9687

97-
/**
98-
* Performs facial analysis on the video at the provided Cloud Storage path.
99-
*
100-
* @param gcsUri the path to the video file to analyze.
101-
*/
102-
public static void analyzeFaces(String gcsUri) throws Exception {
103-
// [START detect_faces]
104-
// Instantiate a com.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient
105-
try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
106-
AnnotateVideoRequest request = AnnotateVideoRequest.newBuilder()
107-
.setInputUri(gcsUri)
108-
.addFeatures(Feature.FACE_DETECTION)
109-
.build();
110-
111-
// asynchronously perform facial analysis on videos
112-
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> response =
113-
client.annotateVideoAsync(request);
114-
115-
System.out.println("Waiting for operation to complete...");
116-
boolean faceFound = false;
117-
for (VideoAnnotationResults results : response.get().getAnnotationResultsList()) {
118-
int faceCount = 0;
119-
for (FaceAnnotation faceAnnotation : results.getFaceAnnotationsList()) {
120-
faceFound = true;
121-
System.out.println("Face: " + ++faceCount);
122-
System.out.println("Thumbnail size: " + faceAnnotation.getThumbnail().size());
123-
for (FaceSegment segment : faceAnnotation.getSegmentsList()) {
124-
double startTime = segment.getSegment().getStartTimeOffset().getSeconds()
125-
+ segment.getSegment().getStartTimeOffset().getNanos() / 1e9;
126-
double endTime = segment.getSegment().getEndTimeOffset().getSeconds()
127-
+ segment.getSegment().getEndTimeOffset().getNanos() / 1e9;
128-
System.out.printf("Segment location : %.3f:%.3f\n", startTime, endTime);
129-
}
130-
try {
131-
// printing info on the first frame
132-
if (faceAnnotation.getFramesCount() > 0) {
133-
System.out.println(faceAnnotation.getFramesList().get(0));
134-
FaceFrame frame = faceAnnotation.getFrames(0);
135-
double timeOffset = frame.getTimeOffset().getSeconds()
136-
+ frame.getTimeOffset().getNanos() / 1e9;
137-
System.out.printf("First frame time offset: %.3fs", timeOffset);
138-
// print info on the first normalized bounding box
139-
NormalizedBoundingBox box = frame.getNormalizedBoundingBoxesList().get(0);
140-
System.out.printf("Left: %.3f\n", box.getLeft());
141-
System.out.printf("Top: %.3f\n", box.getTop());
142-
System.out.printf("Bottom: %.3f\n", box.getBottom());
143-
System.out.printf("Right: %.3f\n", box.getRight());
144-
} else {
145-
System.out.println("No frames found in annotation");
146-
}
147-
} catch (IndexOutOfBoundsException ioe) {
148-
System.out.println("Could not retrieve frame: " + ioe.getMessage());
149-
}
150-
}
151-
}
152-
if (!faceFound) {
153-
System.out.println("No faces detected in " + gcsUri);
154-
}
155-
// [END detect_faces]
156-
}
157-
}
158-
15988
/**
16089
* Performs label analysis on the video at the provided Cloud Storage path.
16190
*

video/src/test/java/com/example/video/DetectIT.java

Lines changed: 0 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,6 @@ public class DetectIT {
3333
private ByteArrayOutputStream bout;
3434
private PrintStream out;
3535

36-
static final String FACES_FILE_LOCATION = "gs://demomaker/gbike.mp4";
3736
static final String LABEL_FILE_LOCATION = "gs://demomaker/cat.mp4";
3837
static final String SHOTS_FILE_LOCATION = "gs://demomaker/gbikes_dinosaur.mp4";
3938
static final String EXPLICIT_CONTENT_LOCATION = "gs://demomaker/cat.mp4";
@@ -50,22 +49,6 @@ public void tearDown() {
5049
System.setOut(null);
5150
}
5251

53-
@Test
54-
public void testFaces() throws Exception {
55-
String[] args = {"faces", FACES_FILE_LOCATION};
56-
Detect.argsHelper(args);
57-
String got = bout.toString();
58-
// Model changes have caused the results from face detection to change to an
59-
// empty response (e.g. no faces detected) so we check either for an empty
60-
// response or that a response with face thumbnails was returned.
61-
if (got.indexOf("No faces detected") == -1) {
62-
assertThat(got).contains("Thumbnail size:");
63-
} else {
64-
// No faces detected, verify sample reports this.
65-
assertThat(got).contains("No faces detected in " + FACES_FILE_LOCATION);
66-
}
67-
}
68-
6952
@Test
7053
public void testLabels() throws Exception {
7154
String[] args = {"labels", LABEL_FILE_LOCATION};

0 commit comments

Comments
 (0)