@@ -48,7 +48,8 @@ import kotlinx.coroutines.flow.map
48
48
import kotlinx.coroutines.tasks.await
49
49
50
50
/* *
51
- * A controller for communicating with the API of a given multimodal model (for example, Gemini).
51
+ * Represents a multimodal model (like Gemini), capable of generating content based on various input
52
+ * types.
52
53
*/
53
54
public class GenerativeModel
54
55
internal constructor (
@@ -122,11 +123,12 @@ internal constructor(
122
123
)
123
124
124
125
/* *
125
- * Generates a [GenerateContentResponse] from the backend with the provided [Content] .
126
+ * Generates new content from the input [Content] given to the model as a prompt .
126
127
*
127
- * @param prompt [Content] to send to the model.
128
- * @return A [GenerateContentResponse]. Function should be called within a suspend context to
129
- * properly manage concurrency.
128
+ * @param prompt The input(s) given to the model as a prompt.
129
+ * @return The content generated by the model.
130
+ * @throws [FirebaseVertexAIException] if the request failed.
131
+ * @see [FirebaseVertexAIException] for types of errors.
130
132
*/
131
133
public suspend fun generateContent (vararg prompt : Content ): GenerateContentResponse =
132
134
try {
@@ -136,10 +138,12 @@ internal constructor(
136
138
}
137
139
138
140
/* *
139
- * Generates a streaming response from the backend with the provided [Content].
141
+ * Generates new content as a stream from the input [Content] given to the model as a prompt .
140
142
*
141
- * @param prompt [Content] to send to the model.
142
- * @return A [Flow] which will emit responses as they are returned from the model.
143
+ * @param prompt The input(s) given to the model as a prompt.
144
+ * @return A [Flow] which will emit responses as they are returned by the model.
145
+ * @throws [FirebaseVertexAIException] if the request failed.
146
+ * @see [FirebaseVertexAIException] for types of errors.
143
147
*/
144
148
public fun generateContentStream (vararg prompt : Content ): Flow <GenerateContentResponse > =
145
149
controller
@@ -148,52 +152,60 @@ internal constructor(
148
152
.map { it.toPublic().validate() }
149
153
150
154
/* *
151
- * Generates a [GenerateContentResponse] from the backend with the provided text prompt.
155
+ * Generates new content from the text input given to the model as a prompt.
152
156
*
153
- * @param prompt The text to be converted into a single piece of [Content] to send to the model.
154
- * @return A [GenerateContentResponse] after some delay. Function should be called within a
155
- * suspend context to properly manage concurrency.
157
+ * @param prompt The text to be send to the model as a prompt.
158
+ * @return The content generated by the model.
159
+ * @throws [FirebaseVertexAIException] if the request failed.
160
+ * @see [FirebaseVertexAIException] for types of errors.
156
161
*/
157
162
public suspend fun generateContent (prompt : String ): GenerateContentResponse =
158
163
generateContent(content { text(prompt) })
159
164
160
165
/* *
161
- * Generates a streaming response from the backend with the provided text prompt.
166
+ * Generates new content as a stream from the text input given to the model as a prompt.
162
167
*
163
- * @param prompt The text to be converted into a single piece of [Content] to send to the model.
164
- * @return A [Flow] which will emit responses as they are returned from the model.
168
+ * @param prompt The text to be send to the model as a prompt.
169
+ * @return A [Flow] which will emit responses as they are returned by the model.
170
+ * @throws [FirebaseVertexAIException] if the request failed.
171
+ * @see [FirebaseVertexAIException] for types of errors.
165
172
*/
166
173
public fun generateContentStream (prompt : String ): Flow <GenerateContentResponse > =
167
174
generateContentStream(content { text(prompt) })
168
175
169
176
/* *
170
- * Generates a [GenerateContentResponse] from the backend with the provided image prompt.
177
+ * Generates new content from the image input given to the model as a prompt.
171
178
*
172
179
* @param prompt The image to be converted into a single piece of [Content] to send to the model.
173
- * @return A [GenerateContentResponse] after some delay. Function should be called within a
174
- * suspend context to properly manage concurrency.
180
+ * @return A [GenerateContentResponse] after some delay.
181
+ * @throws [FirebaseVertexAIException] if the request failed.
182
+ * @see [FirebaseVertexAIException] for types of errors.
175
183
*/
176
184
public suspend fun generateContent (prompt : Bitmap ): GenerateContentResponse =
177
185
generateContent(content { image(prompt) })
178
186
179
187
/* *
180
- * Generates a streaming response from the backend with the provided image prompt.
188
+ * Generates new content as a stream from the image input given to the model as a prompt.
181
189
*
182
190
* @param prompt The image to be converted into a single piece of [Content] to send to the model.
183
- * @return A [Flow] which will emit responses as they are returned from the model.
191
+ * @return A [Flow] which will emit responses as they are returned by the model.
192
+ * @throws [FirebaseVertexAIException] if the request failed.
193
+ * @see [FirebaseVertexAIException] for types of errors.
184
194
*/
185
195
public fun generateContentStream (prompt : Bitmap ): Flow <GenerateContentResponse > =
186
196
generateContentStream(content { image(prompt) })
187
197
188
- /* * Creates a [Chat] instance which internally tracks the ongoing conversation with the model */
198
+ /* * Creates a [Chat] instance using this model with the optionally provided history. */
189
199
public fun startChat (history : List <Content > = emptyList()): Chat =
190
200
Chat (this , history.toMutableList())
191
201
192
202
/* *
193
- * Counts the amount of tokens in a prompt.
203
+ * Counts the number of tokens in a prompt using the model's tokenizer .
194
204
*
195
- * @param prompt A group of [Content] to count tokens of.
196
- * @return A [CountTokensResponse] containing the amount of tokens in the prompt.
205
+ * @param prompt The input(s) given to the model as a prompt.
206
+ * @return The [CountTokensResponse] of running the model's tokenizer on the input.
207
+ * @throws [FirebaseVertexAIException] if the request failed.
208
+ * @see [FirebaseVertexAIException] for types of errors.
197
209
*/
198
210
public suspend fun countTokens (vararg prompt : Content ): CountTokensResponse {
199
211
try {
@@ -204,20 +216,24 @@ internal constructor(
204
216
}
205
217
206
218
/* *
207
- * Counts the amount of tokens in the text prompt.
219
+ * Counts the number of tokens in a text prompt using the model's tokenizer .
208
220
*
209
- * @param prompt The text to be converted to a single piece of [Content] to count the tokens of.
210
- * @return A [CountTokensResponse] containing the amount of tokens in the prompt.
221
+ * @param prompt The text given to the model as a prompt.
222
+ * @return The [CountTokensResponse] of running the model's tokenizer on the input.
223
+ * @throws [FirebaseVertexAIException] if the request failed.
224
+ * @see [FirebaseVertexAIException] for types of errors.
211
225
*/
212
226
public suspend fun countTokens (prompt : String ): CountTokensResponse {
213
227
return countTokens(content { text(prompt) })
214
228
}
215
229
216
230
/* *
217
- * Counts the amount of tokens in the image prompt.
231
+ * Counts the number of tokens in an image prompt using the model's tokenizer .
218
232
*
219
- * @param prompt The image to be converted to a single piece of [Content] to count the tokens of.
220
- * @return A [CountTokensResponse] containing the amount of tokens in the prompt.
233
+ * @param prompt The image given to the model as a prompt.
234
+ * @return The [CountTokensResponse] of running the model's tokenizer on the input.
235
+ * @throws [FirebaseVertexAIException] if the request failed.
236
+ * @see [FirebaseVertexAIException] for types of errors.
221
237
*/
222
238
public suspend fun countTokens (prompt : Bitmap ): CountTokensResponse {
223
239
return countTokens(content { image(prompt) })
0 commit comments