You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Copy file name to clipboardExpand all lines: docs/dyn/firebaseml_v2beta.projects.locations.publishers.models.html
+32-10Lines changed: 32 additions & 10 deletions
Original file line number
Diff line number
Diff line change
@@ -139,7 +139,7 @@ <h3>Method Details</h3>
139
139
"mimeType": "A String", # Required. The IANA standard MIME type of the source data.
140
140
},
141
141
"text": "A String", # Optional. Text part (can be code).
142
-
"thought": True or False, # Output only. Indicates if the part is thought from the model.
142
+
"thought": True or False, # Optional. Indicates if the part is thought from the model.
143
143
"thoughtSignature": "A String", # Optional. An opaque signature for the thought so it can be reused in subsequent requests.
144
144
"videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data.
145
145
"endOffset": "A String", # Optional. The end offset of the video.
@@ -275,7 +275,7 @@ <h3>Method Details</h3>
275
275
"mimeType": "A String", # Required. The IANA standard MIME type of the source data.
276
276
},
277
277
"text": "A String", # Optional. Text part (can be code).
278
-
"thought": True or False, # Output only. Indicates if the part is thought from the model.
278
+
"thought": True or False, # Optional. Indicates if the part is thought from the model.
279
279
"thoughtSignature": "A String", # Optional. An opaque signature for the thought so it can be reused in subsequent requests.
280
280
"videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data.
281
281
"endOffset": "A String", # Optional. The end offset of the video.
@@ -478,6 +478,8 @@ <h3>Method Details</h3>
478
478
"vectorDistanceThreshold": 3.14, # Optional. Only return results with vector distance smaller than the threshold.
479
479
},
480
480
},
481
+
"urlContext": { # Tool to support URL context. # Optional. Tool to support URL context retrieval.
482
+
},
481
483
},
482
484
],
483
485
}
@@ -550,7 +552,7 @@ <h3>Method Details</h3>
550
552
"mimeType": "A String", # Required. The IANA standard MIME type of the source data.
551
553
},
552
554
"text": "A String", # Optional. Text part (can be code).
553
-
"thought": True or False, # Output only. Indicates if the part is thought from the model.
555
+
"thought": True or False, # Optional. Indicates if the part is thought from the model.
554
556
"thoughtSignature": "A String", # Optional. An opaque signature for the thought so it can be reused in subsequent requests.
555
557
"videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data.
556
558
"endOffset": "A String", # Optional. The end offset of the video.
@@ -692,7 +694,7 @@ <h3>Method Details</h3>
692
694
"mimeType": "A String", # Required. The IANA standard MIME type of the source data.
693
695
},
694
696
"text": "A String", # Optional. Text part (can be code).
695
-
"thought": True or False, # Output only. Indicates if the part is thought from the model.
697
+
"thought": True or False, # Optional. Indicates if the part is thought from the model.
696
698
"thoughtSignature": "A String", # Optional. An opaque signature for the thought so it can be reused in subsequent requests.
697
699
"videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data.
698
700
"endOffset": "A String", # Optional. The end offset of the video.
@@ -910,6 +912,8 @@ <h3>Method Details</h3>
910
912
"vectorDistanceThreshold": 3.14, # Optional. Only return results with vector distance smaller than the threshold.
911
913
},
912
914
},
915
+
"urlContext": { # Tool to support URL context. # Optional. Tool to support URL context retrieval.
916
+
},
913
917
},
914
918
],
915
919
}
@@ -978,7 +982,7 @@ <h3>Method Details</h3>
978
982
"mimeType": "A String", # Required. The IANA standard MIME type of the source data.
979
983
},
980
984
"text": "A String", # Optional. Text part (can be code).
981
-
"thought": True or False, # Output only. Indicates if the part is thought from the model.
985
+
"thought": True or False, # Optional. Indicates if the part is thought from the model.
982
986
"thoughtSignature": "A String", # Optional. An opaque signature for the thought so it can be reused in subsequent requests.
983
987
"videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data.
984
988
"endOffset": "A String", # Optional. The end offset of the video.
@@ -1014,7 +1018,7 @@ <h3>Method Details</h3>
1014
1018
],
1015
1019
"groundingSupports": [ # Optional. List of grounding support.
1016
1020
{ # Grounding support.
1017
-
"confidenceScores": [ # Confidence score of the support references. Ranges from 0 to 1. 1 is the most confident. This list must have the same size as the grounding_chunk_indices.
1021
+
"confidenceScores": [ # Confidence score of the support references. Ranges from 0 to 1. 1 is the most confident. For Gemini 2.0 and before, this list must have the same size as the grounding_chunk_indices. For Gemini 2.5 and after, this list will be empty and should be ignored.
1018
1022
3.14,
1019
1023
],
1020
1024
"groundingChunkIndices": [ # A list of indices (into 'grounding_chunk') specifying the citations associated with the claim. For instance [1,3,4] means that grounding_chunk[1], grounding_chunk[3], grounding_chunk[4] are the retrieved content attributed to the claim.
"urlContextMetadata": { # Metadata related to url context retrieval tool. # Output only. Metadata related to url context retrieval tool.
1081
+
"urlMetadata": [ # Output only. List of url context.
1082
+
{ # Context of the a single url retrieval.
1083
+
"retrievedUrl": "A String", # Retrieved url by the tool.
1084
+
"urlRetrievalStatus": "A String", # Status of the url retrieval.
1085
+
},
1086
+
],
1087
+
},
1076
1088
},
1077
1089
],
1078
1090
"createTime": "A String", # Output only. Timestamp when the request is made to the server.
@@ -1176,7 +1188,7 @@ <h3>Method Details</h3>
1176
1188
"mimeType": "A String", # Required. The IANA standard MIME type of the source data.
1177
1189
},
1178
1190
"text": "A String", # Optional. Text part (can be code).
1179
-
"thought": True or False, # Output only. Indicates if the part is thought from the model.
1191
+
"thought": True or False, # Optional. Indicates if the part is thought from the model.
1180
1192
"thoughtSignature": "A String", # Optional. An opaque signature for the thought so it can be reused in subsequent requests.
1181
1193
"videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data.
1182
1194
"endOffset": "A String", # Optional. The end offset of the video.
@@ -1318,7 +1330,7 @@ <h3>Method Details</h3>
1318
1330
"mimeType": "A String", # Required. The IANA standard MIME type of the source data.
1319
1331
},
1320
1332
"text": "A String", # Optional. Text part (can be code).
1321
-
"thought": True or False, # Output only. Indicates if the part is thought from the model.
1333
+
"thought": True or False, # Optional. Indicates if the part is thought from the model.
1322
1334
"thoughtSignature": "A String", # Optional. An opaque signature for the thought so it can be reused in subsequent requests.
1323
1335
"videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data.
1324
1336
"endOffset": "A String", # Optional. The end offset of the video.
@@ -1536,6 +1548,8 @@ <h3>Method Details</h3>
1536
1548
"vectorDistanceThreshold": 3.14, # Optional. Only return results with vector distance smaller than the threshold.
1537
1549
},
1538
1550
},
1551
+
"urlContext": { # Tool to support URL context. # Optional. Tool to support URL context retrieval.
1552
+
},
1539
1553
},
1540
1554
],
1541
1555
}
@@ -1604,7 +1618,7 @@ <h3>Method Details</h3>
1604
1618
"mimeType": "A String", # Required. The IANA standard MIME type of the source data.
1605
1619
},
1606
1620
"text": "A String", # Optional. Text part (can be code).
1607
-
"thought": True or False, # Output only. Indicates if the part is thought from the model.
1621
+
"thought": True or False, # Optional. Indicates if the part is thought from the model.
1608
1622
"thoughtSignature": "A String", # Optional. An opaque signature for the thought so it can be reused in subsequent requests.
1609
1623
"videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data.
1610
1624
"endOffset": "A String", # Optional. The end offset of the video.
@@ -1640,7 +1654,7 @@ <h3>Method Details</h3>
1640
1654
],
1641
1655
"groundingSupports": [ # Optional. List of grounding support.
1642
1656
{ # Grounding support.
1643
-
"confidenceScores": [ # Confidence score of the support references. Ranges from 0 to 1. 1 is the most confident. This list must have the same size as the grounding_chunk_indices.
1657
+
"confidenceScores": [ # Confidence score of the support references. Ranges from 0 to 1. 1 is the most confident. For Gemini 2.0 and before, this list must have the same size as the grounding_chunk_indices. For Gemini 2.5 and after, this list will be empty and should be ignored.
1644
1658
3.14,
1645
1659
],
1646
1660
"groundingChunkIndices": [ # A list of indices (into 'grounding_chunk') specifying the citations associated with the claim. For instance [1,3,4] means that grounding_chunk[1], grounding_chunk[3], grounding_chunk[4] are the retrieved content attributed to the claim.
"description": "Confidence score of the support references. Ranges from 0 to 1. 1 is the most confident. This list must have the same size as the grounding_chunk_indices.",
1475
+
"description": "Confidence score of the support references. Ranges from 0 to 1. 1 is the most confident. For Gemini 2.0 and before, this list must have the same size as the grounding_chunk_indices. For Gemini 2.5 and after, this list will be empty and should be ignored.",
1471
1476
"items": {
1472
1477
"format": "float",
1473
1478
"type": "number"
@@ -1610,8 +1615,7 @@
1610
1615
"type": "string"
1611
1616
},
1612
1617
"thought": {
1613
-
"description": "Output only. Indicates if the part is thought from the model.",
1614
-
"readOnly": true,
1618
+
"description": "Optional. Indicates if the part is thought from the model.",
1615
1619
"type": "boolean"
1616
1620
},
1617
1621
"thoughtSignature": {
@@ -2222,6 +2226,10 @@ true
2222
2226
"retrieval": {
2223
2227
"$ref": "GoogleCloudAiplatformV1beta1Retrieval",
2224
2228
"description": "Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation."
2229
+
},
2230
+
"urlContext": {
2231
+
"$ref": "GoogleCloudAiplatformV1beta1UrlContext",
2232
+
"description": "Optional. Tool to support URL context retrieval."
"description": "Output only. List of url context.",
2295
+
"items": {
2296
+
"$ref": "GoogleCloudAiplatformV1beta1UrlMetadata"
2297
+
},
2298
+
"readOnly": true,
2299
+
"type": "array"
2300
+
}
2301
+
},
2302
+
"type": "object"
2303
+
},
2304
+
"GoogleCloudAiplatformV1beta1UrlMetadata": {
2305
+
"description": "Context of the a single url retrieval.",
2306
+
"id": "GoogleCloudAiplatformV1beta1UrlMetadata",
2307
+
"properties": {
2308
+
"retrievedUrl": {
2309
+
"description": "Retrieved url by the tool.",
2310
+
"type": "string"
2311
+
},
2312
+
"urlRetrievalStatus": {
2313
+
"description": "Status of the url retrieval.",
2314
+
"enum": [
2315
+
"URL_RETRIEVAL_STATUS_UNSPECIFIED",
2316
+
"URL_RETRIEVAL_STATUS_SUCCESS",
2317
+
"URL_RETRIEVAL_STATUS_ERROR"
2318
+
],
2319
+
"enumDescriptions": [
2320
+
"Default value. This value is unused.",
2321
+
"Url retrieval is successful.",
2322
+
"Url retrieval is failed due to error."
2323
+
],
2324
+
"type": "string"
2325
+
}
2326
+
},
2327
+
"type": "object"
2328
+
},
2275
2329
"GoogleCloudAiplatformV1beta1VertexAISearch": {
2276
2330
"description": "Retrieve from Vertex AI Search datastore or engine for grounding. datastore and engine are mutually exclusive. See https://cloud.google.com/products/agent-builder",
0 commit comments