|
13 | 13 | "uid":"personalize-2018-05-22"
|
14 | 14 | },
|
15 | 15 | "operations":{
|
| 16 | + "CreateBatchInferenceJob":{ |
| 17 | + "name":"CreateBatchInferenceJob", |
| 18 | + "http":{ |
| 19 | + "method":"POST", |
| 20 | + "requestUri":"/" |
| 21 | + }, |
| 22 | + "input":{"shape":"CreateBatchInferenceJobRequest"}, |
| 23 | + "output":{"shape":"CreateBatchInferenceJobResponse"}, |
| 24 | + "errors":[ |
| 25 | + {"shape":"InvalidInputException"}, |
| 26 | + {"shape":"ResourceAlreadyExistsException"}, |
| 27 | + {"shape":"LimitExceededException"}, |
| 28 | + {"shape":"ResourceNotFoundException"}, |
| 29 | + {"shape":"ResourceInUseException"} |
| 30 | + ], |
| 31 | + "documentation":"<p>Creates a batch inference job. The operation can handle up to 50 million records and the input file must be in JSON format. For more information, see <a>recommendations-batch</a>.</p>" |
| 32 | + }, |
16 | 33 | "CreateCampaign":{
|
17 | 34 | "name":"CreateCampaign",
|
18 | 35 | "http":{
|
|
252 | 269 | "documentation":"<p>Describes the given algorithm.</p>",
|
253 | 270 | "idempotent":true
|
254 | 271 | },
|
| 272 | + "DescribeBatchInferenceJob":{ |
| 273 | + "name":"DescribeBatchInferenceJob", |
| 274 | + "http":{ |
| 275 | + "method":"POST", |
| 276 | + "requestUri":"/" |
| 277 | + }, |
| 278 | + "input":{"shape":"DescribeBatchInferenceJobRequest"}, |
| 279 | + "output":{"shape":"DescribeBatchInferenceJobResponse"}, |
| 280 | + "errors":[ |
| 281 | + {"shape":"InvalidInputException"}, |
| 282 | + {"shape":"ResourceNotFoundException"} |
| 283 | + ], |
| 284 | + "documentation":"<p>Gets the properties of a batch inference job including name, Amazon Resource Name (ARN), status, input and output configurations, and the ARN of the solution version used to generate the recommendations.</p>", |
| 285 | + "idempotent":true |
| 286 | + }, |
255 | 287 | "DescribeCampaign":{
|
256 | 288 | "name":"DescribeCampaign",
|
257 | 289 | "http":{
|
|
417 | 449 | ],
|
418 | 450 | "documentation":"<p>Gets the metrics for the specified solution version.</p>"
|
419 | 451 | },
|
| 452 | + "ListBatchInferenceJobs":{ |
| 453 | + "name":"ListBatchInferenceJobs", |
| 454 | + "http":{ |
| 455 | + "method":"POST", |
| 456 | + "requestUri":"/" |
| 457 | + }, |
| 458 | + "input":{"shape":"ListBatchInferenceJobsRequest"}, |
| 459 | + "output":{"shape":"ListBatchInferenceJobsResponse"}, |
| 460 | + "errors":[ |
| 461 | + {"shape":"InvalidInputException"}, |
| 462 | + {"shape":"InvalidNextTokenException"} |
| 463 | + ], |
| 464 | + "documentation":"<p>Gets a list of the batch inference jobs that have been performed off of a solution version.</p>", |
| 465 | + "idempotent":true |
| 466 | + }, |
420 | 467 | "ListCampaigns":{
|
421 | 468 | "name":"ListCampaigns",
|
422 | 469 | "http":{
|
|
671 | 718 | "type":"string",
|
672 | 719 | "max":10000
|
673 | 720 | },
|
| 721 | + "BatchInferenceJob":{ |
| 722 | + "type":"structure", |
| 723 | + "members":{ |
| 724 | + "jobName":{ |
| 725 | + "shape":"Name", |
| 726 | + "documentation":"<p>The name of the batch inference job.</p>" |
| 727 | + }, |
| 728 | + "batchInferenceJobArn":{ |
| 729 | + "shape":"Arn", |
| 730 | + "documentation":"<p>The Amazon Resource Name (ARN) of the batch inference job.</p>" |
| 731 | + }, |
| 732 | + "failureReason":{ |
| 733 | + "shape":"FailureReason", |
| 734 | + "documentation":"<p>If the batch inference job failed, the reason for the failure.</p>" |
| 735 | + }, |
| 736 | + "solutionVersionArn":{ |
| 737 | + "shape":"Arn", |
| 738 | + "documentation":"<p>The Amazon Resource Name (ARN) of the solution version from which the batch inference job was created.</p>" |
| 739 | + }, |
| 740 | + "numResults":{ |
| 741 | + "shape":"NumBatchResults", |
| 742 | + "documentation":"<p>The number of recommendations generated by the batch inference job. This number includes the error messages generated for failed input records.</p>" |
| 743 | + }, |
| 744 | + "jobInput":{ |
| 745 | + "shape":"BatchInferenceJobInput", |
| 746 | + "documentation":"<p>The Amazon S3 path that leads to the input data used to generate the batch inference job.</p>" |
| 747 | + }, |
| 748 | + "jobOutput":{ |
| 749 | + "shape":"BatchInferenceJobOutput", |
| 750 | + "documentation":"<p>The Amazon S3 bucket that contains the output data generated by the batch inference job.</p>" |
| 751 | + }, |
| 752 | + "roleArn":{ |
| 753 | + "shape":"RoleArn", |
| 754 | + "documentation":"<p>The ARN of the Amazon Identity and Access Management (IAM) role that requested the batch inference job.</p>" |
| 755 | + }, |
| 756 | + "status":{ |
| 757 | + "shape":"Status", |
| 758 | + "documentation":"<p>The status of the batch inference job. The status is one of the following values:</p> <ul> <li> <p>PENDING</p> </li> <li> <p>IN PROGRESS</p> </li> <li> <p>ACTIVE</p> </li> <li> <p>CREATE FAILED</p> </li> </ul>" |
| 759 | + }, |
| 760 | + "creationDateTime":{ |
| 761 | + "shape":"Date", |
| 762 | + "documentation":"<p>The time at which the batch inference job was created.</p>" |
| 763 | + }, |
| 764 | + "lastUpdatedDateTime":{ |
| 765 | + "shape":"Date", |
| 766 | + "documentation":"<p>The time at which the batch inference job was last updated.</p>" |
| 767 | + } |
| 768 | + }, |
| 769 | + "documentation":"<p>Contains information on a batch inference job.</p>" |
| 770 | + }, |
| 771 | + "BatchInferenceJobInput":{ |
| 772 | + "type":"structure", |
| 773 | + "required":["s3DataSource"], |
| 774 | + "members":{ |
| 775 | + "s3DataSource":{ |
| 776 | + "shape":"S3DataConfig", |
| 777 | + "documentation":"<p>The URI of the Amazon S3 location that contains your input data. The Amazon S3 bucket must be in the same region as the API endpoint you are calling.</p>" |
| 778 | + } |
| 779 | + }, |
| 780 | + "documentation":"<p>The input configuration of a batch inference job.</p>" |
| 781 | + }, |
| 782 | + "BatchInferenceJobOutput":{ |
| 783 | + "type":"structure", |
| 784 | + "required":["s3DataDestination"], |
| 785 | + "members":{ |
| 786 | + "s3DataDestination":{ |
| 787 | + "shape":"S3DataConfig", |
| 788 | + "documentation":"<p>Information on the Amazon S3 bucket in which the batch inference job's output is stored.</p>" |
| 789 | + } |
| 790 | + }, |
| 791 | + "documentation":"<p>The output configuration parameters of a batch inference job.</p>" |
| 792 | + }, |
| 793 | + "BatchInferenceJobSummary":{ |
| 794 | + "type":"structure", |
| 795 | + "members":{ |
| 796 | + "batchInferenceJobArn":{ |
| 797 | + "shape":"Arn", |
| 798 | + "documentation":"<p>The Amazon Resource Name (ARN) of the batch inference job.</p>" |
| 799 | + }, |
| 800 | + "jobName":{ |
| 801 | + "shape":"Name", |
| 802 | + "documentation":"<p>The name of the batch inference job.</p>" |
| 803 | + }, |
| 804 | + "status":{ |
| 805 | + "shape":"Status", |
| 806 | + "documentation":"<p>The status of the batch inference job. The status is one of the following values:</p> <ul> <li> <p>PENDING</p> </li> <li> <p>IN PROGRESS</p> </li> <li> <p>ACTIVE</p> </li> <li> <p>CREATE FAILED</p> </li> </ul>" |
| 807 | + }, |
| 808 | + "creationDateTime":{ |
| 809 | + "shape":"Date", |
| 810 | + "documentation":"<p>The time at which the batch inference job was created.</p>" |
| 811 | + }, |
| 812 | + "lastUpdatedDateTime":{ |
| 813 | + "shape":"Date", |
| 814 | + "documentation":"<p>The time at which the batch inference job was last updated.</p>" |
| 815 | + }, |
| 816 | + "failureReason":{ |
| 817 | + "shape":"FailureReason", |
| 818 | + "documentation":"<p>If the batch inference job failed, the reason for the failure.</p>" |
| 819 | + } |
| 820 | + }, |
| 821 | + "documentation":"<p>A truncated version of the <a>BatchInferenceJob</a> datatype. The <a>ListBatchInferenceJobs</a> operation returns a list of batch inference job summaries.</p>" |
| 822 | + }, |
| 823 | + "BatchInferenceJobs":{ |
| 824 | + "type":"list", |
| 825 | + "member":{"shape":"BatchInferenceJobSummary"}, |
| 826 | + "max":100 |
| 827 | + }, |
674 | 828 | "Boolean":{"type":"boolean"},
|
675 | 829 | "Campaign":{
|
676 | 830 | "type":"structure",
|
|
835 | 989 | "type":"double",
|
836 | 990 | "min":-1000000
|
837 | 991 | },
|
| 992 | + "CreateBatchInferenceJobRequest":{ |
| 993 | + "type":"structure", |
| 994 | + "required":[ |
| 995 | + "jobName", |
| 996 | + "solutionVersionArn", |
| 997 | + "jobInput", |
| 998 | + "jobOutput", |
| 999 | + "roleArn" |
| 1000 | + ], |
| 1001 | + "members":{ |
| 1002 | + "jobName":{ |
| 1003 | + "shape":"Name", |
| 1004 | + "documentation":"<p>The name of the batch inference job to create.</p>" |
| 1005 | + }, |
| 1006 | + "solutionVersionArn":{ |
| 1007 | + "shape":"Arn", |
| 1008 | + "documentation":"<p>The Amazon Resource Name (ARN) of the solution version that will be used to generate the batch inference recommendations.</p>" |
| 1009 | + }, |
| 1010 | + "numResults":{ |
| 1011 | + "shape":"NumBatchResults", |
| 1012 | + "documentation":"<p>The number of recommendations to retreive.</p>" |
| 1013 | + }, |
| 1014 | + "jobInput":{ |
| 1015 | + "shape":"BatchInferenceJobInput", |
| 1016 | + "documentation":"<p>The Amazon S3 path that leads to the input file to base your recommendations on. The input material must be in JSON format.</p>" |
| 1017 | + }, |
| 1018 | + "jobOutput":{ |
| 1019 | + "shape":"BatchInferenceJobOutput", |
| 1020 | + "documentation":"<p>The path to the Amazon S3 bucket where the job's output will be stored.</p>" |
| 1021 | + }, |
| 1022 | + "roleArn":{ |
| 1023 | + "shape":"RoleArn", |
| 1024 | + "documentation":"<p>The ARN of the Amazon Identity and Access Management role that has permissions to read and write to your input and out Amazon S3 buckets respectively.</p>" |
| 1025 | + } |
| 1026 | + } |
| 1027 | + }, |
| 1028 | + "CreateBatchInferenceJobResponse":{ |
| 1029 | + "type":"structure", |
| 1030 | + "members":{ |
| 1031 | + "batchInferenceJobArn":{ |
| 1032 | + "shape":"Arn", |
| 1033 | + "documentation":"<p>The ARN of the batch inference job.</p>" |
| 1034 | + } |
| 1035 | + } |
| 1036 | + }, |
838 | 1037 | "CreateCampaignRequest":{
|
839 | 1038 | "type":"structure",
|
840 | 1039 | "required":[
|
|
1550 | 1749 | }
|
1551 | 1750 | }
|
1552 | 1751 | },
|
| 1752 | + "DescribeBatchInferenceJobRequest":{ |
| 1753 | + "type":"structure", |
| 1754 | + "required":["batchInferenceJobArn"], |
| 1755 | + "members":{ |
| 1756 | + "batchInferenceJobArn":{ |
| 1757 | + "shape":"Arn", |
| 1758 | + "documentation":"<p>The ARN of the batch inference job to describe.</p>" |
| 1759 | + } |
| 1760 | + } |
| 1761 | + }, |
| 1762 | + "DescribeBatchInferenceJobResponse":{ |
| 1763 | + "type":"structure", |
| 1764 | + "members":{ |
| 1765 | + "batchInferenceJob":{ |
| 1766 | + "shape":"BatchInferenceJob", |
| 1767 | + "documentation":"<p>Information on the specified batch inference job.</p>" |
| 1768 | + } |
| 1769 | + } |
| 1770 | + }, |
1553 | 1771 | "DescribeCampaignRequest":{
|
1554 | 1772 | "type":"structure",
|
1555 | 1773 | "required":["campaignArn"],
|
|
2027 | 2245 | "documentation":"<p>The limit on the number of requests per second has been exceeded.</p>",
|
2028 | 2246 | "exception":true
|
2029 | 2247 | },
|
| 2248 | + "ListBatchInferenceJobsRequest":{ |
| 2249 | + "type":"structure", |
| 2250 | + "members":{ |
| 2251 | + "solutionVersionArn":{ |
| 2252 | + "shape":"Arn", |
| 2253 | + "documentation":"<p>The Amazon Resource Name (ARN) of the solution version from which the batch inference jobs were created.</p>" |
| 2254 | + }, |
| 2255 | + "nextToken":{ |
| 2256 | + "shape":"NextToken", |
| 2257 | + "documentation":"<p>The token to request the next page of results.</p>" |
| 2258 | + }, |
| 2259 | + "maxResults":{ |
| 2260 | + "shape":"MaxResults", |
| 2261 | + "documentation":"<p>The maximum number of batch inference job results to return in each page. The default value is 100.</p>" |
| 2262 | + } |
| 2263 | + } |
| 2264 | + }, |
| 2265 | + "ListBatchInferenceJobsResponse":{ |
| 2266 | + "type":"structure", |
| 2267 | + "members":{ |
| 2268 | + "batchInferenceJobs":{ |
| 2269 | + "shape":"BatchInferenceJobs", |
| 2270 | + "documentation":"<p>A list containing information on each job that is returned.</p>" |
| 2271 | + }, |
| 2272 | + "nextToken":{ |
| 2273 | + "shape":"NextToken", |
| 2274 | + "documentation":"<p>The token to use to retreive the next page of results. The value is <code>null</code> when there are no more results to return.</p>" |
| 2275 | + } |
| 2276 | + } |
| 2277 | + }, |
2030 | 2278 | "ListCampaignsRequest":{
|
2031 | 2279 | "type":"structure",
|
2032 | 2280 | "members":{
|
|
2319 | 2567 | "type":"string",
|
2320 | 2568 | "max":1300
|
2321 | 2569 | },
|
| 2570 | + "NumBatchResults":{"type":"integer"}, |
2322 | 2571 | "ParameterName":{
|
2323 | 2572 | "type":"string",
|
2324 | 2573 | "max":256
|
|
2445 | 2694 | "max":256,
|
2446 | 2695 | "pattern":"arn:([a-z\\d-]+):iam::\\d{12}:role/?[a-zA-Z_0-9+=,.@\\-_/]+"
|
2447 | 2696 | },
|
| 2697 | + "S3DataConfig":{ |
| 2698 | + "type":"structure", |
| 2699 | + "required":["path"], |
| 2700 | + "members":{ |
| 2701 | + "path":{ |
| 2702 | + "shape":"S3Location", |
| 2703 | + "documentation":"<p>The file path of the Amazon S3 bucket.</p>" |
| 2704 | + }, |
| 2705 | + "kmsKeyArn":{ |
| 2706 | + "shape":"KmsKeyArn", |
| 2707 | + "documentation":"<p>The Amazon Resource Name (ARN) of the Amazon Key Management Service (KMS) key that Amazon Personalize uses to encrypt or decrypt the input and output files of a batch inference job.</p>" |
| 2708 | + } |
| 2709 | + }, |
| 2710 | + "documentation":"<p>The configuration details of an Amazon S3 input or output bucket.</p>" |
| 2711 | + }, |
2448 | 2712 | "S3Location":{
|
2449 | 2713 | "type":"string",
|
2450 | 2714 | "max":256
|
|
2521 | 2785 | },
|
2522 | 2786 | "hpoConfig":{
|
2523 | 2787 | "shape":"HPOConfig",
|
2524 |
| - "documentation":"<p>Describes the properties for hyperparameter optimization (HPO). For use with the bring-your-own-recipe feature. Not used with Amazon Personalize predefined recipes.</p>" |
| 2788 | + "documentation":"<p>Describes the properties for hyperparameter optimization (HPO).</p>" |
2525 | 2789 | },
|
2526 | 2790 | "algorithmHyperParameters":{
|
2527 | 2791 | "shape":"HyperParameters",
|
|
0 commit comments