|
231 | 231 | "content":{
|
232 | 232 | "shape":"GuardrailContentBlockList",
|
233 | 233 | "documentation":"<p>The content details used in the request to apply the guardrail.</p>"
|
| 234 | + }, |
| 235 | + "outputScope":{ |
| 236 | + "shape":"GuardrailOutputScope", |
| 237 | + "documentation":"<p>Specifies the scope of the output that you get in the response. Set to <code>FULL</code> to return the entire output, including any detected and non-detected entries in the response for enhanced debugging.</p> <p>Note that the full output scope doesn't apply to word filters or regex in sensitive information filters. It does apply to all other filtering policies, including sensitive information with filters that can detect personally identifiable information (PII).</p>" |
234 | 238 | }
|
235 | 239 | }
|
236 | 240 | },
|
|
251 | 255 | "shape":"GuardrailAction",
|
252 | 256 | "documentation":"<p>The action taken in the response from the guardrail.</p>"
|
253 | 257 | },
|
| 258 | + "actionReason":{ |
| 259 | + "shape":"String", |
| 260 | + "documentation":"<p>The reason for the action taken when harmful content is detected.</p>" |
| 261 | + }, |
254 | 262 | "outputs":{
|
255 | 263 | "shape":"GuardrailOutputContentList",
|
256 | 264 | "documentation":"<p>The output details in the response from the guardrail.</p>"
|
|
392 | 400 | "min":0,
|
393 | 401 | "sensitive":true
|
394 | 402 | },
|
| 403 | + "Boolean":{ |
| 404 | + "type":"boolean", |
| 405 | + "box":true |
| 406 | + }, |
395 | 407 | "CachePointBlock":{
|
396 | 408 | "type":"structure",
|
397 | 409 | "required":["type"],
|
|
1111 | 1123 | "action":{
|
1112 | 1124 | "shape":"GuardrailContentPolicyAction",
|
1113 | 1125 | "documentation":"<p>The guardrail action.</p>"
|
| 1126 | + }, |
| 1127 | + "detected":{ |
| 1128 | + "shape":"Boolean", |
| 1129 | + "documentation":"<p>Indicates whether content that breaches the guardrail configuration is detected.</p>" |
1114 | 1130 | }
|
1115 | 1131 | },
|
1116 | 1132 | "documentation":"<p>The content filter for a guardrail.</p>"
|
|
1150 | 1166 | },
|
1151 | 1167 | "GuardrailContentPolicyAction":{
|
1152 | 1168 | "type":"string",
|
1153 |
| - "enum":["BLOCKED"] |
| 1169 | + "enum":[ |
| 1170 | + "BLOCKED", |
| 1171 | + "NONE" |
| 1172 | + ] |
1154 | 1173 | },
|
1155 | 1174 | "GuardrailContentPolicyAssessment":{
|
1156 | 1175 | "type":"structure",
|
|
1214 | 1233 | "action":{
|
1215 | 1234 | "shape":"GuardrailContextualGroundingPolicyAction",
|
1216 | 1235 | "documentation":"<p>The action performed by the guardrails contextual grounding filter.</p>"
|
| 1236 | + }, |
| 1237 | + "detected":{ |
| 1238 | + "shape":"Boolean", |
| 1239 | + "documentation":"<p>Indicates whether content that fails the contextual grounding evaluation (grounding or relevance score less than the corresponding threshold) was detected.</p>" |
1217 | 1240 | }
|
1218 | 1241 | },
|
1219 | 1242 | "documentation":"<p>The details for the guardrails contextual grounding filter.</p>"
|
|
1374 | 1397 | "action":{
|
1375 | 1398 | "shape":"GuardrailWordPolicyAction",
|
1376 | 1399 | "documentation":"<p>The action for the custom word.</p>"
|
| 1400 | + }, |
| 1401 | + "detected":{ |
| 1402 | + "shape":"Boolean", |
| 1403 | + "documentation":"<p>Indicates whether custom word content that breaches the guardrail configuration is detected.</p>" |
1377 | 1404 | }
|
1378 | 1405 | },
|
1379 | 1406 | "documentation":"<p>A custom word configured in a guardrail.</p>"
|
|
1481 | 1508 | "action":{
|
1482 | 1509 | "shape":"GuardrailWordPolicyAction",
|
1483 | 1510 | "documentation":"<p>The action for the managed word.</p>"
|
| 1511 | + }, |
| 1512 | + "detected":{ |
| 1513 | + "shape":"Boolean", |
| 1514 | + "documentation":"<p>Indicates whether managed word content that breaches the guardrail configuration is detected.</p>" |
1484 | 1515 | }
|
1485 | 1516 | },
|
1486 | 1517 | "documentation":"<p>A managed word configured in a guardrail.</p>"
|
|
1507 | 1538 | "type":"list",
|
1508 | 1539 | "member":{"shape":"GuardrailOutputContent"}
|
1509 | 1540 | },
|
| 1541 | + "GuardrailOutputScope":{ |
| 1542 | + "type":"string", |
| 1543 | + "enum":[ |
| 1544 | + "INTERVENTIONS", |
| 1545 | + "FULL" |
| 1546 | + ] |
| 1547 | + }, |
1510 | 1548 | "GuardrailOutputText":{"type":"string"},
|
1511 | 1549 | "GuardrailPiiEntityFilter":{
|
1512 | 1550 | "type":"structure",
|
|
1527 | 1565 | "action":{
|
1528 | 1566 | "shape":"GuardrailSensitiveInformationPolicyAction",
|
1529 | 1567 | "documentation":"<p>The PII entity filter action.</p>"
|
| 1568 | + }, |
| 1569 | + "detected":{ |
| 1570 | + "shape":"Boolean", |
| 1571 | + "documentation":"<p>Indicates whether personally identifiable information (PII) that breaches the guardrail configuration is detected.</p>" |
1530 | 1572 | }
|
1531 | 1573 | },
|
1532 | 1574 | "documentation":"<p>A Personally Identifiable Information (PII) entity configured in a guardrail.</p>"
|
|
1594 | 1636 | "action":{
|
1595 | 1637 | "shape":"GuardrailSensitiveInformationPolicyAction",
|
1596 | 1638 | "documentation":"<p>The region filter action.</p>"
|
| 1639 | + }, |
| 1640 | + "detected":{ |
| 1641 | + "shape":"Boolean", |
| 1642 | + "documentation":"<p>Indicates whether custom regex entities that breach the guardrail configuration are detected.</p>" |
1597 | 1643 | }
|
1598 | 1644 | },
|
1599 | 1645 | "documentation":"<p>A Regex filter configured in a guardrail.</p>"
|
|
1606 | 1652 | "type":"string",
|
1607 | 1653 | "enum":[
|
1608 | 1654 | "ANONYMIZED",
|
1609 |
| - "BLOCKED" |
| 1655 | + "BLOCKED", |
| 1656 | + "NONE" |
1610 | 1657 | ]
|
1611 | 1658 | },
|
1612 | 1659 | "GuardrailSensitiveInformationPolicyAssessment":{
|
|
1716 | 1763 | "action":{
|
1717 | 1764 | "shape":"GuardrailTopicPolicyAction",
|
1718 | 1765 | "documentation":"<p>The action the guardrail should take when it intervenes on a topic.</p>"
|
| 1766 | + }, |
| 1767 | + "detected":{ |
| 1768 | + "shape":"Boolean", |
| 1769 | + "documentation":"<p>Indicates whether topic content that breaches the guardrail configuration is detected.</p>" |
1719 | 1770 | }
|
1720 | 1771 | },
|
1721 | 1772 | "documentation":"<p>Information about a topic guardrail.</p>"
|
|
1726 | 1777 | },
|
1727 | 1778 | "GuardrailTopicPolicyAction":{
|
1728 | 1779 | "type":"string",
|
1729 |
| - "enum":["BLOCKED"] |
| 1780 | + "enum":[ |
| 1781 | + "BLOCKED", |
| 1782 | + "NONE" |
| 1783 | + ] |
1730 | 1784 | },
|
1731 | 1785 | "GuardrailTopicPolicyAssessment":{
|
1732 | 1786 | "type":"structure",
|
|
1751 | 1805 | "type":"string",
|
1752 | 1806 | "enum":[
|
1753 | 1807 | "enabled",
|
1754 |
| - "disabled" |
| 1808 | + "disabled", |
| 1809 | + "enabled_full" |
1755 | 1810 | ]
|
1756 | 1811 | },
|
1757 | 1812 | "GuardrailTraceAssessment":{
|
|
1768 | 1823 | "outputAssessments":{
|
1769 | 1824 | "shape":"GuardrailAssessmentListMap",
|
1770 | 1825 | "documentation":"<p>the output assessments.</p>"
|
| 1826 | + }, |
| 1827 | + "actionReason":{ |
| 1828 | + "shape":"String", |
| 1829 | + "documentation":"<p>Provides the reason for the action taken when harmful content is detected.</p>" |
1771 | 1830 | }
|
1772 | 1831 | },
|
1773 | 1832 | "documentation":"<p>A Top level guardrail trace object. For more information, see <a>ConverseTrace</a>.</p>"
|
|
1820 | 1879 | },
|
1821 | 1880 | "GuardrailWordPolicyAction":{
|
1822 | 1881 | "type":"string",
|
1823 |
| - "enum":["BLOCKED"] |
| 1882 | + "enum":[ |
| 1883 | + "BLOCKED", |
| 1884 | + "NONE" |
| 1885 | + ] |
1824 | 1886 | },
|
1825 | 1887 | "GuardrailWordPolicyAssessment":{
|
1826 | 1888 | "type":"structure",
|
|
3004 | 3066 | "type":"string",
|
3005 | 3067 | "enum":[
|
3006 | 3068 | "ENABLED",
|
3007 |
| - "DISABLED" |
| 3069 | + "DISABLED", |
| 3070 | + "ENABLED_FULL" |
3008 | 3071 | ]
|
3009 | 3072 | },
|
3010 | 3073 | "ValidationException":{
|
|
0 commit comments