Skip to content

Commit f795a25

Browse files
author
awstools
committed
feat(client-rekognition): This release adds support for tagging projects and datasets with the CreateProject and CreateDataset APIs.
1 parent 0ecd09f commit f795a25

File tree

5 files changed

+36
-4
lines changed

5 files changed

+36
-4
lines changed

clients/client-rekognition/src/commands/CreateDatasetCommand.ts

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -69,6 +69,9 @@ export interface CreateDatasetCommandOutput extends CreateDatasetResponse, __Met
6969
* },
7070
* DatasetType: "TRAIN" || "TEST", // required
7171
* ProjectArn: "STRING_VALUE", // required
72+
* Tags: { // TagMap
73+
* "<keys>": "STRING_VALUE",
74+
* },
7275
* };
7376
* const command = new CreateDatasetCommand(input);
7477
* const response = await client.send(command);

clients/client-rekognition/src/commands/CreateProjectCommand.ts

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,9 @@ export interface CreateProjectCommandOutput extends CreateProjectResponse, __Met
4444
* ProjectName: "STRING_VALUE", // required
4545
* Feature: "CONTENT_MODERATION" || "CUSTOM_LABELS",
4646
* AutoUpdate: "ENABLED" || "DISABLED",
47+
* Tags: { // TagMap
48+
* "<keys>": "STRING_VALUE",
49+
* },
4750
* };
4851
* const command = new CreateProjectCommand(input);
4952
* const response = await client.send(command);

clients/client-rekognition/src/commands/GetLabelDetectionCommand.ts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -94,6 +94,8 @@ export interface GetLabelDetectionCommandOutput extends GetLabelDetectionRespons
9494
* next set of results. To get the next page of results, call <code>GetlabelDetection</code> and
9595
* populate the <code>NextToken</code> request parameter with the token value returned from the
9696
* previous call to <code>GetLabelDetection</code>.</p>
97+
* <p>If you are retrieving results while using the Amazon Simple Notification Service, note that you will receive an
98+
* "ERROR" notification if the job encounters an issue.</p>
9799
* @example
98100
* Use a bare-bones client and the command you need to make an API call.
99101
* ```javascript

clients/client-rekognition/src/models/models_0.ts

Lines changed: 14 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2370,6 +2370,12 @@ export interface CreateDatasetRequest {
23702370
* @public
23712371
*/
23722372
ProjectArn: string | undefined;
2373+
2374+
/**
2375+
* <p>A set of tags (key-value pairs) that you want to attach to the dataset.</p>
2376+
* @public
2377+
*/
2378+
Tags?: Record<string, string>;
23732379
}
23742380

23752381
/**
@@ -2526,6 +2532,12 @@ export interface CreateProjectRequest {
25262532
* @public
25272533
*/
25282534
AutoUpdate?: ProjectAutoUpdate;
2535+
2536+
/**
2537+
* <p>A set of tags (key-value pairs) that you want to attach to the project.</p>
2538+
* @public
2539+
*/
2540+
Tags?: Record<string, string>;
25292541
}
25302542

25312543
/**
@@ -5015,8 +5027,8 @@ export interface HumanLoopActivationOutput {
50155027
*/
50165028
export interface DetectModerationLabelsResponse {
50175029
/**
5018-
* <p>Array of detected Moderation labels and the time, in milliseconds from the start of the
5019-
* video, they were detected.</p>
5030+
* <p>Array of detected Moderation labels. For video operations, this includes the time,
5031+
* in milliseconds from the start of the video, they were detected.</p>
50205032
* @public
50215033
*/
50225034
ModerationLabels?: ModerationLabel[];

codegen/sdk-codegen/aws-models/rekognition.json

Lines changed: 14 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1546,6 +1546,12 @@
15461546
"smithy.api#documentation": "<p>\nThe ARN of the Amazon Rekognition Custom Labels project to which you want to asssign the dataset.\n</p>",
15471547
"smithy.api#required": {}
15481548
}
1549+
},
1550+
"Tags": {
1551+
"target": "com.amazonaws.rekognition#TagMap",
1552+
"traits": {
1553+
"smithy.api#documentation": "<p>A set of tags (key-value pairs) that you want to attach to the dataset.</p>"
1554+
}
15491555
}
15501556
},
15511557
"traits": {
@@ -1725,6 +1731,12 @@
17251731
"traits": {
17261732
"smithy.api#documentation": "<p>Specifies whether automatic retraining should be attempted for the versions of the\n project. Automatic retraining is done as a best effort. Required argument for Content\n Moderation. Applicable only to adapters.</p>"
17271733
}
1734+
},
1735+
"Tags": {
1736+
"target": "com.amazonaws.rekognition#TagMap",
1737+
"traits": {
1738+
"smithy.api#documentation": "<p>A set of tags (key-value pairs) that you want to attach to the project.</p>"
1739+
}
17281740
}
17291741
},
17301742
"traits": {
@@ -4318,7 +4330,7 @@
43184330
"ModerationLabels": {
43194331
"target": "com.amazonaws.rekognition#ModerationLabels",
43204332
"traits": {
4321-
"smithy.api#documentation": "<p>Array of detected Moderation labels and the time, in milliseconds from the start of the\n video, they were detected.</p>"
4333+
"smithy.api#documentation": "<p>Array of detected Moderation labels. For video operations, this includes the time, \n in milliseconds from the start of the video, they were detected.</p>"
43224334
}
43234335
},
43244336
"ModerationModelVersion": {
@@ -6298,7 +6310,7 @@
62986310
}
62996311
],
63006312
"traits": {
6301-
"smithy.api#documentation": "<p>Gets the label detection results of a Amazon Rekognition Video analysis started by <a>StartLabelDetection</a>. </p>\n <p>The label detection operation is started by a call to <a>StartLabelDetection</a> which returns a job identifier (<code>JobId</code>). When\n the label detection operation finishes, Amazon Rekognition publishes a completion status to the\n Amazon Simple Notification Service topic registered in the initial call to <code>StartlabelDetection</code>. </p>\n <p>To get the results of the label detection operation, first check that the status value\n published to the Amazon SNS topic is <code>SUCCEEDED</code>. If so, call <a>GetLabelDetection</a> and pass the job identifier (<code>JobId</code>) from the\n initial call to <code>StartLabelDetection</code>.</p>\n <p>\n <code>GetLabelDetection</code> returns an array of detected labels\n (<code>Labels</code>) sorted by the time the labels were detected. You can also sort by the\n label name by specifying <code>NAME</code> for the <code>SortBy</code> input parameter. If\n there is no <code>NAME</code> specified, the default sort is by\n timestamp.</p>\n <p>You can select how results are aggregated by using the <code>AggregateBy</code> input\n parameter. The default aggregation method is <code>TIMESTAMPS</code>. You can also aggregate\n by <code>SEGMENTS</code>, which aggregates all instances of labels detected in a given\n segment. </p>\n <p>The returned Labels array may include the following attributes:</p>\n <ul>\n <li>\n <p>Name - The name of the detected label.</p>\n </li>\n <li>\n <p>Confidence - The level of confidence in the label assigned to a detected object. </p>\n </li>\n <li>\n <p>Parents - The ancestor labels for a detected label. GetLabelDetection returns a hierarchical\n taxonomy of detected labels. For example, a detected car might be assigned the label car.\n The label car has two parent labels: Vehicle (its parent) and Transportation (its\n grandparent). The response includes the all ancestors for a label, where every ancestor is\n a unique label. In the previous example, Car, Vehicle, and Transportation are returned as\n unique labels in the response. </p>\n </li>\n <li>\n <p> Aliases - Possible Aliases for the label. </p>\n </li>\n <li>\n <p>Categories - The label categories that the detected label belongs to.</p>\n </li>\n <li>\n <p>BoundingBox — Bounding boxes are described for all instances of detected common object labels, \n returned in an array of Instance objects. An Instance object contains a BoundingBox object, describing \n the location of the label on the input image. It also includes the confidence for the accuracy of the detected bounding box.</p>\n </li>\n <li>\n <p>Timestamp - Time, in milliseconds from the start of the video, that the label was detected.\n For aggregation by <code>SEGMENTS</code>, the <code>StartTimestampMillis</code>,\n <code>EndTimestampMillis</code>, and <code>DurationMillis</code> structures are what\n define a segment. Although the “Timestamp” structure is still returned with each label,\n its value is set to be the same as <code>StartTimestampMillis</code>.</p>\n </li>\n </ul>\n <p>Timestamp and Bounding box information are returned for detected Instances, only if\n aggregation is done by <code>TIMESTAMPS</code>. If aggregating by <code>SEGMENTS</code>,\n information about detected instances isn’t returned. </p>\n <p>The version of the label model used for the detection is also returned.</p>\n <p>\n <b>Note <code>DominantColors</code> isn't returned for <code>Instances</code>,\n although it is shown as part of the response in the sample seen below.</b>\n </p>\n <p>Use <code>MaxResults</code> parameter to limit the number of labels returned. If\n there are more results than specified in <code>MaxResults</code>, the value of\n <code>NextToken</code> in the operation response contains a pagination token for getting the\n next set of results. To get the next page of results, call <code>GetlabelDetection</code> and\n populate the <code>NextToken</code> request parameter with the token value returned from the\n previous call to <code>GetLabelDetection</code>.</p>",
6313+
"smithy.api#documentation": "<p>Gets the label detection results of a Amazon Rekognition Video analysis started by <a>StartLabelDetection</a>. </p>\n <p>The label detection operation is started by a call to <a>StartLabelDetection</a> which returns a job identifier (<code>JobId</code>). When\n the label detection operation finishes, Amazon Rekognition publishes a completion status to the\n Amazon Simple Notification Service topic registered in the initial call to <code>StartlabelDetection</code>. </p>\n <p>To get the results of the label detection operation, first check that the status value\n published to the Amazon SNS topic is <code>SUCCEEDED</code>. If so, call <a>GetLabelDetection</a> and pass the job identifier (<code>JobId</code>) from the\n initial call to <code>StartLabelDetection</code>.</p>\n <p>\n <code>GetLabelDetection</code> returns an array of detected labels\n (<code>Labels</code>) sorted by the time the labels were detected. You can also sort by the\n label name by specifying <code>NAME</code> for the <code>SortBy</code> input parameter. If\n there is no <code>NAME</code> specified, the default sort is by\n timestamp.</p>\n <p>You can select how results are aggregated by using the <code>AggregateBy</code> input\n parameter. The default aggregation method is <code>TIMESTAMPS</code>. You can also aggregate\n by <code>SEGMENTS</code>, which aggregates all instances of labels detected in a given\n segment. </p>\n <p>The returned Labels array may include the following attributes:</p>\n <ul>\n <li>\n <p>Name - The name of the detected label.</p>\n </li>\n <li>\n <p>Confidence - The level of confidence in the label assigned to a detected object. </p>\n </li>\n <li>\n <p>Parents - The ancestor labels for a detected label. GetLabelDetection returns a hierarchical\n taxonomy of detected labels. For example, a detected car might be assigned the label car.\n The label car has two parent labels: Vehicle (its parent) and Transportation (its\n grandparent). The response includes the all ancestors for a label, where every ancestor is\n a unique label. In the previous example, Car, Vehicle, and Transportation are returned as\n unique labels in the response. </p>\n </li>\n <li>\n <p> Aliases - Possible Aliases for the label. </p>\n </li>\n <li>\n <p>Categories - The label categories that the detected label belongs to.</p>\n </li>\n <li>\n <p>BoundingBox — Bounding boxes are described for all instances of detected common object labels, \n returned in an array of Instance objects. An Instance object contains a BoundingBox object, describing \n the location of the label on the input image. It also includes the confidence for the accuracy of the detected bounding box.</p>\n </li>\n <li>\n <p>Timestamp - Time, in milliseconds from the start of the video, that the label was detected.\n For aggregation by <code>SEGMENTS</code>, the <code>StartTimestampMillis</code>,\n <code>EndTimestampMillis</code>, and <code>DurationMillis</code> structures are what\n define a segment. Although the “Timestamp” structure is still returned with each label,\n its value is set to be the same as <code>StartTimestampMillis</code>.</p>\n </li>\n </ul>\n <p>Timestamp and Bounding box information are returned for detected Instances, only if\n aggregation is done by <code>TIMESTAMPS</code>. If aggregating by <code>SEGMENTS</code>,\n information about detected instances isn’t returned. </p>\n <p>The version of the label model used for the detection is also returned.</p>\n <p>\n <b>Note <code>DominantColors</code> isn't returned for <code>Instances</code>,\n although it is shown as part of the response in the sample seen below.</b>\n </p>\n <p>Use <code>MaxResults</code> parameter to limit the number of labels returned. If\n there are more results than specified in <code>MaxResults</code>, the value of\n <code>NextToken</code> in the operation response contains a pagination token for getting the\n next set of results. To get the next page of results, call <code>GetlabelDetection</code> and\n populate the <code>NextToken</code> request parameter with the token value returned from the\n previous call to <code>GetLabelDetection</code>.</p>\n <p>If you are retrieving results while using the Amazon Simple Notification Service, note that you will receive an\n \"ERROR\" notification if the job encounters an issue.</p>",
63026314
"smithy.api#paginated": {
63036315
"inputToken": "NextToken",
63046316
"outputToken": "NextToken",

0 commit comments

Comments
 (0)