Skip to content

Commit 980b180

Browse files
rlazodaymxn
andauthored
General improvement to VertexAI kdocs (#6370)
Several improvements to the kdocs of the public API of Vertex AI in Firebase. --------- Co-authored-by: Daymon <[email protected]>
1 parent 2d6c899 commit 980b180

File tree

12 files changed

+266
-132
lines changed

12 files changed

+266
-132
lines changed

firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/Chat.kt

Lines changed: 60 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -33,14 +33,16 @@ import kotlinx.coroutines.flow.onEach
3333
/**
3434
* Representation of a multi-turn interaction with a model.
3535
*
36-
* Handles the capturing and storage of the communication with the model, providing methods for
37-
* further interaction.
36+
* Captures and stores the history of communication in memory, and provides it as context with each
37+
* new message.
3838
*
3939
* **Note:** This object is not thread-safe, and calling [sendMessage] multiple times without
4040
* waiting for a response will throw an [InvalidStateException].
4141
*
42-
* @param model The model to use for the interaction
43-
* @property history The previous interactions with the model
42+
* @param model The model to use for the interaction.
43+
* @property history The previous content from the chat that has been successfully sent and received
44+
* from the model. This will be provided to the model for each message sent (as context for the
45+
* discussion).
4446
*/
4547
public class Chat(
4648
private val model: GenerativeModel,
@@ -49,11 +51,15 @@ public class Chat(
4951
private var lock = Semaphore(1)
5052

5153
/**
52-
* Generates a response from the backend with the provided [Content], and any previous ones
53-
* sent/returned from this chat.
54+
* Sends a message using the provided [prompt]; automatically providing the existing [history] as
55+
* context.
5456
*
55-
* @param prompt A [Content] to send to the model.
56-
* @throws InvalidStateException if the prompt is not coming from the 'user' role
57+
* If successful, the message and response will be added to the [history]. If unsuccessful,
58+
* [history] will remain unchanged.
59+
*
60+
* @param prompt The input that, together with the history, will be given to the model as the
61+
* prompt.
62+
* @throws InvalidStateException if [prompt] is not coming from the 'user' role.
5763
* @throws InvalidStateException if the [Chat] instance has an active request.
5864
*/
5965
public suspend fun sendMessage(prompt: Content): GenerateContentResponse {
@@ -70,9 +76,15 @@ public class Chat(
7076
}
7177

7278
/**
73-
* Generates a response from the backend with the provided text prompt.
79+
* Sends a message using the provided [text prompt][prompt]; automatically providing the existing
80+
* [history] as context.
81+
*
82+
* If successful, the message and response will be added to the [history]. If unsuccessful,
83+
* [history] will remain unchanged.
7484
*
75-
* @param prompt The text to be converted into a single piece of [Content] to send to the model.
85+
* @param prompt The input that, together with the history, will be given to the model as the
86+
* prompt.
87+
* @throws InvalidStateException if [prompt] is not coming from the 'user' role.
7688
* @throws InvalidStateException if the [Chat] instance has an active request.
7789
*/
7890
public suspend fun sendMessage(prompt: String): GenerateContentResponse {
@@ -81,9 +93,15 @@ public class Chat(
8193
}
8294

8395
/**
84-
* Generates a response from the backend with the provided image prompt.
96+
* Sends a message using the existing history of this chat as context and the provided image
97+
* prompt.
98+
*
99+
* If successful, the message and response will be added to the history. If unsuccessful, history
100+
* will remain unchanged.
85101
*
86-
* @param prompt The image to be converted into a single piece of [Content] to send to the model.
102+
* @param prompt The input that, together with the history, will be given to the model as the
103+
* prompt.
104+
* @throws InvalidStateException if [prompt] is not coming from the 'user' role.
87105
* @throws InvalidStateException if the [Chat] instance has an active request.
88106
*/
89107
public suspend fun sendMessage(prompt: Bitmap): GenerateContentResponse {
@@ -92,11 +110,17 @@ public class Chat(
92110
}
93111

94112
/**
95-
* Generates a streaming response from the backend with the provided [Content].
113+
* Sends a message using the existing history of this chat as context and the provided [Content]
114+
* prompt.
115+
*
116+
* The response from the model is returned as a stream.
117+
*
118+
* If successful, the message and response will be added to the history. If unsuccessful, history
119+
* will remain unchanged.
96120
*
97-
* @param prompt A [Content] to send to the model.
98-
* @return A [Flow] which will emit responses as they are returned from the model.
99-
* @throws InvalidStateException if the prompt is not coming from the 'user' role
121+
* @param prompt The input that, together with the history, will be given to the model as the
122+
* prompt.
123+
* @throws InvalidStateException if [prompt] is not coming from the 'user' role.
100124
* @throws InvalidStateException if the [Chat] instance has an active request.
101125
*/
102126
public fun sendMessageStream(prompt: Content): Flow<GenerateContentResponse> {
@@ -146,10 +170,17 @@ public class Chat(
146170
}
147171

148172
/**
149-
* Generates a streaming response from the backend with the provided text prompt.
173+
* Sends a message using the existing history of this chat as context and the provided text
174+
* prompt.
150175
*
151-
* @param prompt a text to be converted into a single piece of [Content] to send to the model
152-
* @return A [Flow] which will emit responses as they are returned from the model.
176+
* The response from the model is returned as a stream.
177+
*
178+
* If successful, the message and response will be added to the history. If unsuccessful, history
179+
* will remain unchanged.
180+
*
181+
* @param prompt The input(s) that, together with the history, will be given to the model as the
182+
* prompt.
183+
* @throws InvalidStateException if [prompt] is not coming from the 'user' role.
153184
* @throws InvalidStateException if the [Chat] instance has an active request.
154185
*/
155186
public fun sendMessageStream(prompt: String): Flow<GenerateContentResponse> {
@@ -158,10 +189,17 @@ public class Chat(
158189
}
159190

160191
/**
161-
* Generates a streaming response from the backend with the provided image prompt.
192+
* Sends a message using the existing history of this chat as context and the provided image
193+
* prompt.
194+
*
195+
* The response from the model is returned as a stream.
196+
*
197+
* If successful, the message and response will be added to the history. If unsuccessful, history
198+
* will remain unchanged.
162199
*
163-
* @param prompt A [Content] to send to the model.
164-
* @return A [Flow] which will emit responses as they are returned from the model.
200+
* @param prompt The input that, together with the history, will be given to the model as the
201+
* prompt.
202+
* @throws InvalidStateException if [prompt] is not coming from the 'user' role.
165203
* @throws InvalidStateException if the [Chat] instance has an active request.
166204
*/
167205
public fun sendMessageStream(prompt: Bitmap): Flow<GenerateContentResponse> {

firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/FirebaseVertexAI.kt

Lines changed: 11 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -42,13 +42,15 @@ internal constructor(
4242
/**
4343
* Instantiates a new [GenerativeModel] given the provided parameters.
4444
*
45-
* @param modelName name of the model in the backend
46-
* @param generationConfig configuration parameters to use for content generation
47-
* @param safetySettings safety bounds to use during alongside prompts during content generation
48-
* @param requestOptions configuration options to utilize during backend communication
49-
* @param tools list of tools to make available to the model
50-
* @param toolConfig configuration that defines how the model handles the tools provided
51-
* @param systemInstruction contains a [Content] that directs the model to behave a certain way
45+
* @param modelName The name of the model to use, for example "gemini-1.5-pro".
46+
* @param generationConfig The configuration parameters to use for content generation.
47+
* @param safetySettings The safety bounds the model will abide to during content generation.
48+
* @param tools A list of [Tool]s the model may use to generate content.
49+
* @param toolConfig The [ToolConfig] that defines how the model handles the tools provided.
50+
* @param systemInstruction [Content] instructions that direct the model to behave a certain way.
51+
* Currently only text content is supported.
52+
* @param requestOptions Configuration options for sending requests to the backend.
53+
* @return The initialized [GenerativeModel] instance.
5254
*/
5355
@JvmOverloads
5456
public fun generativeModel(
@@ -86,10 +88,11 @@ internal constructor(
8688
@JvmStatic public fun getInstance(app: FirebaseApp): FirebaseVertexAI = getInstance(app)
8789

8890
/**
89-
* Returns the [FirebaseVertexAI] instance for the provided [FirebaseApp] and [location]
91+
* Returns the [FirebaseVertexAI] instance for the provided [FirebaseApp] and [location].
9092
*
9193
* @param location location identifier, defaults to `us-central1`; see available
9294
* [Vertex AI regions](https://firebase.google.com/docs/vertex-ai/locations?platform=android#available-locations)
95+
* .
9396
*/
9497
@JvmStatic
9598
@JvmOverloads

firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/FirebaseVertexAIMultiResourceComponent.kt

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,11 @@ import com.google.firebase.appcheck.interop.InteropAppCheckTokenProvider
2222
import com.google.firebase.auth.internal.InternalAuthProvider
2323
import com.google.firebase.inject.Provider
2424

25-
/** Multi-resource container for Firebase Vertex AI */
25+
/**
26+
* Multi-resource container for Firebase Vertex AI.
27+
*
28+
* @hide
29+
*/
2630
internal class FirebaseVertexAIMultiResourceComponent(
2731
private val app: FirebaseApp,
2832
private val appCheckProvider: Provider<InteropAppCheckTokenProvider>,

firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/GenerativeModel.kt

Lines changed: 46 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,8 @@ import kotlinx.coroutines.flow.map
4848
import kotlinx.coroutines.tasks.await
4949

5050
/**
51-
* A controller for communicating with the API of a given multimodal model (for example, Gemini).
51+
* Represents a multimodal model (like Gemini), capable of generating content based on various input
52+
* types.
5253
*/
5354
public class GenerativeModel
5455
internal constructor(
@@ -122,11 +123,12 @@ internal constructor(
122123
)
123124

124125
/**
125-
* Generates a [GenerateContentResponse] from the backend with the provided [Content].
126+
* Generates new content from the input [Content] given to the model as a prompt.
126127
*
127-
* @param prompt [Content] to send to the model.
128-
* @return A [GenerateContentResponse]. Function should be called within a suspend context to
129-
* properly manage concurrency.
128+
* @param prompt The input(s) given to the model as a prompt.
129+
* @return The content generated by the model.
130+
* @throws [FirebaseVertexAIException] if the request failed.
131+
* @see [FirebaseVertexAIException] for types of errors.
130132
*/
131133
public suspend fun generateContent(vararg prompt: Content): GenerateContentResponse =
132134
try {
@@ -136,10 +138,12 @@ internal constructor(
136138
}
137139

138140
/**
139-
* Generates a streaming response from the backend with the provided [Content].
141+
* Generates new content as a stream from the input [Content] given to the model as a prompt.
140142
*
141-
* @param prompt [Content] to send to the model.
142-
* @return A [Flow] which will emit responses as they are returned from the model.
143+
* @param prompt The input(s) given to the model as a prompt.
144+
* @return A [Flow] which will emit responses as they are returned by the model.
145+
* @throws [FirebaseVertexAIException] if the request failed.
146+
* @see [FirebaseVertexAIException] for types of errors.
143147
*/
144148
public fun generateContentStream(vararg prompt: Content): Flow<GenerateContentResponse> =
145149
controller
@@ -148,52 +152,60 @@ internal constructor(
148152
.map { it.toPublic().validate() }
149153

150154
/**
151-
* Generates a [GenerateContentResponse] from the backend with the provided text prompt.
155+
* Generates new content from the text input given to the model as a prompt.
152156
*
153-
* @param prompt The text to be converted into a single piece of [Content] to send to the model.
154-
* @return A [GenerateContentResponse] after some delay. Function should be called within a
155-
* suspend context to properly manage concurrency.
157+
* @param prompt The text to be send to the model as a prompt.
158+
* @return The content generated by the model.
159+
* @throws [FirebaseVertexAIException] if the request failed.
160+
* @see [FirebaseVertexAIException] for types of errors.
156161
*/
157162
public suspend fun generateContent(prompt: String): GenerateContentResponse =
158163
generateContent(content { text(prompt) })
159164

160165
/**
161-
* Generates a streaming response from the backend with the provided text prompt.
166+
* Generates new content as a stream from the text input given to the model as a prompt.
162167
*
163-
* @param prompt The text to be converted into a single piece of [Content] to send to the model.
164-
* @return A [Flow] which will emit responses as they are returned from the model.
168+
* @param prompt The text to be send to the model as a prompt.
169+
* @return A [Flow] which will emit responses as they are returned by the model.
170+
* @throws [FirebaseVertexAIException] if the request failed.
171+
* @see [FirebaseVertexAIException] for types of errors.
165172
*/
166173
public fun generateContentStream(prompt: String): Flow<GenerateContentResponse> =
167174
generateContentStream(content { text(prompt) })
168175

169176
/**
170-
* Generates a [GenerateContentResponse] from the backend with the provided image prompt.
177+
* Generates new content from the image input given to the model as a prompt.
171178
*
172179
* @param prompt The image to be converted into a single piece of [Content] to send to the model.
173-
* @return A [GenerateContentResponse] after some delay. Function should be called within a
174-
* suspend context to properly manage concurrency.
180+
* @return A [GenerateContentResponse] after some delay.
181+
* @throws [FirebaseVertexAIException] if the request failed.
182+
* @see [FirebaseVertexAIException] for types of errors.
175183
*/
176184
public suspend fun generateContent(prompt: Bitmap): GenerateContentResponse =
177185
generateContent(content { image(prompt) })
178186

179187
/**
180-
* Generates a streaming response from the backend with the provided image prompt.
188+
* Generates new content as a stream from the image input given to the model as a prompt.
181189
*
182190
* @param prompt The image to be converted into a single piece of [Content] to send to the model.
183-
* @return A [Flow] which will emit responses as they are returned from the model.
191+
* @return A [Flow] which will emit responses as they are returned by the model.
192+
* @throws [FirebaseVertexAIException] if the request failed.
193+
* @see [FirebaseVertexAIException] for types of errors.
184194
*/
185195
public fun generateContentStream(prompt: Bitmap): Flow<GenerateContentResponse> =
186196
generateContentStream(content { image(prompt) })
187197

188-
/** Creates a [Chat] instance which internally tracks the ongoing conversation with the model */
198+
/** Creates a [Chat] instance using this model with the optionally provided history. */
189199
public fun startChat(history: List<Content> = emptyList()): Chat =
190200
Chat(this, history.toMutableList())
191201

192202
/**
193-
* Counts the amount of tokens in a prompt.
203+
* Counts the number of tokens in a prompt using the model's tokenizer.
194204
*
195-
* @param prompt A group of [Content] to count tokens of.
196-
* @return A [CountTokensResponse] containing the amount of tokens in the prompt.
205+
* @param prompt The input(s) given to the model as a prompt.
206+
* @return The [CountTokensResponse] of running the model's tokenizer on the input.
207+
* @throws [FirebaseVertexAIException] if the request failed.
208+
* @see [FirebaseVertexAIException] for types of errors.
197209
*/
198210
public suspend fun countTokens(vararg prompt: Content): CountTokensResponse {
199211
try {
@@ -204,20 +216,24 @@ internal constructor(
204216
}
205217

206218
/**
207-
* Counts the amount of tokens in the text prompt.
219+
* Counts the number of tokens in a text prompt using the model's tokenizer.
208220
*
209-
* @param prompt The text to be converted to a single piece of [Content] to count the tokens of.
210-
* @return A [CountTokensResponse] containing the amount of tokens in the prompt.
221+
* @param prompt The text given to the model as a prompt.
222+
* @return The [CountTokensResponse] of running the model's tokenizer on the input.
223+
* @throws [FirebaseVertexAIException] if the request failed.
224+
* @see [FirebaseVertexAIException] for types of errors.
211225
*/
212226
public suspend fun countTokens(prompt: String): CountTokensResponse {
213227
return countTokens(content { text(prompt) })
214228
}
215229

216230
/**
217-
* Counts the amount of tokens in the image prompt.
231+
* Counts the number of tokens in an image prompt using the model's tokenizer.
218232
*
219-
* @param prompt The image to be converted to a single piece of [Content] to count the tokens of.
220-
* @return A [CountTokensResponse] containing the amount of tokens in the prompt.
233+
* @param prompt The image given to the model as a prompt.
234+
* @return The [CountTokensResponse] of running the model's tokenizer on the input.
235+
* @throws [FirebaseVertexAIException] if the request failed.
236+
* @see [FirebaseVertexAIException] for types of errors.
221237
*/
222238
public suspend fun countTokens(prompt: Bitmap): CountTokensResponse {
223239
return countTokens(content { image(prompt) })

firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/java/ChatFutures.kt

Lines changed: 23 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -25,28 +25,43 @@ import kotlinx.coroutines.reactive.asPublisher
2525
import org.reactivestreams.Publisher
2626

2727
/**
28-
* Helper method for interacting with a [Chat] from Java.
28+
* Wrapper class providing Java compatible methods for [Chat].
2929
*
30-
* @see from
30+
* @see [Chat]
3131
*/
3232
public abstract class ChatFutures internal constructor() {
3333

3434
/**
35-
* Generates a response from the backend with the provided [Content], and any previous ones
36-
* sent/returned from this chat.
35+
* Sends a message using the existing history of this chat as context and the provided [Content]
36+
* prompt.
3737
*
38-
* @param prompt A [Content] to send to the model.
38+
* If successful, the message and response will be added to the history. If unsuccessful, history
39+
* will remain unchanged.
40+
*
41+
* @param prompt The input(s) that, together with the history, will be given to the model as the
42+
* prompt.
43+
* @throws InvalidStateException if [prompt] is not coming from the 'user' role
44+
* @throws InvalidStateException if the [Chat] instance has an active request
3945
*/
4046
public abstract fun sendMessage(prompt: Content): ListenableFuture<GenerateContentResponse>
4147

4248
/**
43-
* Generates a streaming response from the backend with the provided [Content].
49+
* Sends a message using the existing history of this chat as context and the provided [Content]
50+
* prompt.
51+
*
52+
* The response from the model is returned as a stream.
53+
*
54+
* If successful, the message and response will be added to the history. If unsuccessful, history
55+
* will remain unchanged.
4456
*
45-
* @param prompt A [Content] to send to the model.
57+
* @param prompt The input(s) that, together with the history, will be given to the model as the
58+
* prompt.
59+
* @throws InvalidStateException if [prompt] is not coming from the 'user' role
60+
* @throws InvalidStateException if the [Chat] instance has an active request
4661
*/
4762
public abstract fun sendMessageStream(prompt: Content): Publisher<GenerateContentResponse>
4863

49-
/** Returns the [Chat] instance that was used to create this instance */
64+
/** Returns the [Chat] object wrapped by this object. */
5065
public abstract fun getChat(): Chat
5166

5267
private class FuturesImpl(private val chat: Chat) : ChatFutures() {

0 commit comments

Comments
 (0)