|
401 | 401 | }
|
402 | 402 | }
|
403 | 403 | },
|
| 404 | +"aisafety": { |
| 405 | +"methods": { |
| 406 | +"classifyContent": { |
| 407 | +"description": "Analyze a piece of content with the provided set of policies.", |
| 408 | +"flatPath": "v1alpha/aisafety:classifyContent", |
| 409 | +"httpMethod": "POST", |
| 410 | +"id": "checks.aisafety.classifyContent", |
| 411 | +"parameterOrder": [], |
| 412 | +"parameters": {}, |
| 413 | +"path": "v1alpha/aisafety:classifyContent", |
| 414 | +"request": { |
| 415 | +"$ref": "GoogleChecksAisafetyV1alphaClassifyContentRequest" |
| 416 | +}, |
| 417 | +"response": { |
| 418 | +"$ref": "GoogleChecksAisafetyV1alphaClassifyContentResponse" |
| 419 | +} |
| 420 | +} |
| 421 | +} |
| 422 | +}, |
404 | 423 | "media": {
|
405 | 424 | "methods": {
|
406 | 425 | "upload": {
|
|
444 | 463 | }
|
445 | 464 | }
|
446 | 465 | },
|
447 |
| -"revision": "20241025", |
| 466 | +"revision": "20241029", |
448 | 467 | "rootUrl": "https://checks.googleapis.com/",
|
449 | 468 | "schemas": {
|
450 | 469 | "CancelOperationRequest": {
|
|
492 | 511 | },
|
493 | 512 | "type": "object"
|
494 | 513 | },
|
| 514 | +"GoogleChecksAisafetyV1alphaClassifyContentRequest": { |
| 515 | +"description": "Request proto for ClassifyContent RPC.", |
| 516 | +"id": "GoogleChecksAisafetyV1alphaClassifyContentRequest", |
| 517 | +"properties": { |
| 518 | +"classifierVersion": { |
| 519 | +"description": "Optional. Version of the classifier to use. If not specified, the latest version will be used.", |
| 520 | +"enum": [ |
| 521 | +"CLASSIFIER_VERSION_UNSPECIFIED", |
| 522 | +"STABLE", |
| 523 | +"LATEST" |
| 524 | +], |
| 525 | +"enumDescriptions": [ |
| 526 | +"Unspecified version.", |
| 527 | +"Stable version.", |
| 528 | +"Latest version." |
| 529 | +], |
| 530 | +"type": "string" |
| 531 | +}, |
| 532 | +"context": { |
| 533 | +"$ref": "GoogleChecksAisafetyV1alphaClassifyContentRequestContext", |
| 534 | +"description": "Optional. Context about the input that will be used to help on the classification." |
| 535 | +}, |
| 536 | +"input": { |
| 537 | +"$ref": "GoogleChecksAisafetyV1alphaClassifyContentRequestInputContent", |
| 538 | +"description": "Required. Content to be classified." |
| 539 | +}, |
| 540 | +"policies": { |
| 541 | +"description": "Required. List of policies to classify against.", |
| 542 | +"items": { |
| 543 | +"$ref": "GoogleChecksAisafetyV1alphaClassifyContentRequestPolicyConfig" |
| 544 | +}, |
| 545 | +"type": "array" |
| 546 | +} |
| 547 | +}, |
| 548 | +"type": "object" |
| 549 | +}, |
| 550 | +"GoogleChecksAisafetyV1alphaClassifyContentRequestContext": { |
| 551 | +"description": "Context about the input that will be used to help on the classification.", |
| 552 | +"id": "GoogleChecksAisafetyV1alphaClassifyContentRequestContext", |
| 553 | +"properties": { |
| 554 | +"prompt": { |
| 555 | +"description": "Optional. Prompt that generated the model response.", |
| 556 | +"type": "string" |
| 557 | +} |
| 558 | +}, |
| 559 | +"type": "object" |
| 560 | +}, |
| 561 | +"GoogleChecksAisafetyV1alphaClassifyContentRequestInputContent": { |
| 562 | +"description": "Content to be classified.", |
| 563 | +"id": "GoogleChecksAisafetyV1alphaClassifyContentRequestInputContent", |
| 564 | +"properties": { |
| 565 | +"textInput": { |
| 566 | +"$ref": "GoogleChecksAisafetyV1alphaTextInput", |
| 567 | +"description": "Content in text format." |
| 568 | +} |
| 569 | +}, |
| 570 | +"type": "object" |
| 571 | +}, |
| 572 | +"GoogleChecksAisafetyV1alphaClassifyContentRequestPolicyConfig": { |
| 573 | +"description": "List of policies to classify against.", |
| 574 | +"id": "GoogleChecksAisafetyV1alphaClassifyContentRequestPolicyConfig", |
| 575 | +"properties": { |
| 576 | +"policyType": { |
| 577 | +"description": "Required. Type of the policy.", |
| 578 | +"enum": [ |
| 579 | +"POLICY_TYPE_UNSPECIFIED", |
| 580 | +"DANGEROUS_CONTENT", |
| 581 | +"PII_SOLICITING_RECITING", |
| 582 | +"HARASSMENT", |
| 583 | +"SEXUALLY_EXPLICIT", |
| 584 | +"HATE_SPEECH", |
| 585 | +"MEDICAL_INFO", |
| 586 | +"VIOLENCE_AND_GORE", |
| 587 | +"OBSCENITY_AND_PROFANITY" |
| 588 | +], |
| 589 | +"enumDescriptions": [ |
| 590 | +"Default.", |
| 591 | +"The model facilitates, promotes or enables access to harmful goods, services, and activities.", |
| 592 | +"The model reveals an individual\u2019s personal information and data.", |
| 593 | +"The model generates content that is malicious, intimidating, bullying, or abusive towards another individual.", |
| 594 | +"The model generates content that is sexually explicit in nature.", |
| 595 | +"The model promotes violence, hatred, discrimination on the basis of race, religion, etc.", |
| 596 | +"The model facilitates harm by providing health advice or guidance.", |
| 597 | +"The model generates content that contains gratuitous, realistic descriptions of violence or gore.", |
| 598 | +"" |
| 599 | +], |
| 600 | +"type": "string" |
| 601 | +}, |
| 602 | +"threshold": { |
| 603 | +"description": "Optional. Score threshold to use when deciding if the content is violative or non-violative. If not specified, the default 0.5 threshold for the policy will be used.", |
| 604 | +"format": "float", |
| 605 | +"type": "number" |
| 606 | +} |
| 607 | +}, |
| 608 | +"type": "object" |
| 609 | +}, |
| 610 | +"GoogleChecksAisafetyV1alphaClassifyContentResponse": { |
| 611 | +"description": "Response proto for ClassifyContent RPC.", |
| 612 | +"id": "GoogleChecksAisafetyV1alphaClassifyContentResponse", |
| 613 | +"properties": { |
| 614 | +"policyResults": { |
| 615 | +"description": "Results of the classification for each policy.", |
| 616 | +"items": { |
| 617 | +"$ref": "GoogleChecksAisafetyV1alphaClassifyContentResponsePolicyResult" |
| 618 | +}, |
| 619 | +"type": "array" |
| 620 | +} |
| 621 | +}, |
| 622 | +"type": "object" |
| 623 | +}, |
| 624 | +"GoogleChecksAisafetyV1alphaClassifyContentResponsePolicyResult": { |
| 625 | +"description": "Result for one policy against the corresponding input.", |
| 626 | +"id": "GoogleChecksAisafetyV1alphaClassifyContentResponsePolicyResult", |
| 627 | +"properties": { |
| 628 | +"policyType": { |
| 629 | +"description": "Type of the policy.", |
| 630 | +"enum": [ |
| 631 | +"POLICY_TYPE_UNSPECIFIED", |
| 632 | +"DANGEROUS_CONTENT", |
| 633 | +"PII_SOLICITING_RECITING", |
| 634 | +"HARASSMENT", |
| 635 | +"SEXUALLY_EXPLICIT", |
| 636 | +"HATE_SPEECH", |
| 637 | +"MEDICAL_INFO", |
| 638 | +"VIOLENCE_AND_GORE", |
| 639 | +"OBSCENITY_AND_PROFANITY" |
| 640 | +], |
| 641 | +"enumDescriptions": [ |
| 642 | +"Default.", |
| 643 | +"The model facilitates, promotes or enables access to harmful goods, services, and activities.", |
| 644 | +"The model reveals an individual\u2019s personal information and data.", |
| 645 | +"The model generates content that is malicious, intimidating, bullying, or abusive towards another individual.", |
| 646 | +"The model generates content that is sexually explicit in nature.", |
| 647 | +"The model promotes violence, hatred, discrimination on the basis of race, religion, etc.", |
| 648 | +"The model facilitates harm by providing health advice or guidance.", |
| 649 | +"The model generates content that contains gratuitous, realistic descriptions of violence or gore.", |
| 650 | +"" |
| 651 | +], |
| 652 | +"type": "string" |
| 653 | +}, |
| 654 | +"score": { |
| 655 | +"description": "Final score for the results of this policy.", |
| 656 | +"format": "float", |
| 657 | +"type": "number" |
| 658 | +}, |
| 659 | +"violationResult": { |
| 660 | +"description": "Result of the classification for the policy.", |
| 661 | +"enum": [ |
| 662 | +"VIOLATION_RESULT_UNSPECIFIED", |
| 663 | +"VIOLATIVE", |
| 664 | +"NON_VIOLATIVE", |
| 665 | +"CLASSIFICATION_ERROR" |
| 666 | +], |
| 667 | +"enumDescriptions": [ |
| 668 | +"Unspecified result.", |
| 669 | +"The final score is greater or equal the input score threshold.", |
| 670 | +"The final score is smaller than the input score threshold.", |
| 671 | +"There was an error and the violation result could not be determined." |
| 672 | +], |
| 673 | +"type": "string" |
| 674 | +} |
| 675 | +}, |
| 676 | +"type": "object" |
| 677 | +}, |
| 678 | +"GoogleChecksAisafetyV1alphaTextInput": { |
| 679 | +"description": "Text input to be classified.", |
| 680 | +"id": "GoogleChecksAisafetyV1alphaTextInput", |
| 681 | +"properties": { |
| 682 | +"content": { |
| 683 | +"description": "Actual piece of text to be classified.", |
| 684 | +"type": "string" |
| 685 | +}, |
| 686 | +"languageCode": { |
| 687 | +"description": "Optional. Language of the text in ISO 639-1 format. If the language is invalid or not specified, the system will try to detect it.", |
| 688 | +"type": "string" |
| 689 | +} |
| 690 | +}, |
| 691 | +"type": "object" |
| 692 | +}, |
495 | 693 | "GoogleChecksReportV1alphaAnalyzeUploadRequest": {
|
496 | 694 | "description": "The request message for ReportService.AnalyzeUpload.",
|
497 | 695 | "id": "GoogleChecksReportV1alphaAnalyzeUploadRequest",
|
|
0 commit comments