@@ -35,6 +35,22 @@ import java.util.function.Consumer
35
35
* 4. Obtain your API key: After subscribing to a plan, you will be redirected
36
36
* to the API dashboard, where you can find your unique API key. Copy and store it securely.
37
37
*
38
+ * All API methods in this class have a non-blocking option which will enqueues
39
+ * the HTTPS request on a different thread. These method names have `Async
40
+ * appended to the end of their names.
41
+ *
42
+ * Completions API:
43
+ * * [createCompletion]
44
+ * * [streamCompletion]
45
+ * * [createCompletionAsync]
46
+ * * [streamCompletionAsync]
47
+ *
48
+ * Chat API:
49
+ * * [createChatCompletion]
50
+ * * [streamChatCompletion]
51
+ * * [createChatCompletionAsync]
52
+ * * [streamChatCompletionAsync]
53
+ *
38
54
* @property apiKey Your OpenAI API key. It starts with `"sk-"` (without the quotes).
39
55
* @property organization If you belong to multiple organizations, specify which one to use (else `null`).
40
56
* @property client Controls proxies, timeouts, etc.
@@ -60,9 +76,20 @@ class OpenAI @JvmOverloads constructor(
60
76
}
61
77
62
78
/* *
79
+ * Predicts which text comes after the prompt, thus "completing" the text.
80
+ *
81
+ * Calls OpenAI's Completions API and waits until the entire completion is
82
+ * generated. When [CompletionRequest.maxTokens] is a big number, it will
83
+ * take a long time to generate all the tokens, so it is recommended to use
84
+ * [streamCompletionAsync] instead to allow users to see partial completions.
85
+ *
86
+ * This method blocks the current thread until the stream is complete. For
87
+ * non-blocking options, use [streamCompletionAsync]. It is important to
88
+ * consider which thread you are currently running on. Running this method
89
+ * on [javax.swing]'s thread, for example, will cause your UI to freeze
90
+ * temporarily.
63
91
*
64
- * @param request The input information for the Completions API.
65
- * @return The value returned by the Completions API.
92
+ * @param request The data to send to the API endpoint.
66
93
* @since 1.3.0
67
94
*/
68
95
@Throws(OpenAIError ::class )
@@ -85,11 +112,21 @@ class OpenAI @JvmOverloads constructor(
85
112
}
86
113
87
114
/* *
88
- * Create completion async
115
+ * Predicts which text comes after the prompt, thus "completing" the text.
116
+ *
117
+ * Calls OpenAI's Completions API and waits until the entire completion is
118
+ * generated. When [CompletionRequest.maxTokens] is a big number, it will
119
+ * take a long time to generate all the tokens, so it is recommended to use
120
+ * [streamCompletionAsync] instead to allow users to see partial completions.
89
121
*
90
- * @param request
91
- * @param onResponse
92
- * @param onFailure
122
+ * This method will not block the current thread. The code block [onResponse]
123
+ * will be run later on a different thread. Due to the different thread, it
124
+ * is important to consider thread safety in the context of your program. To
125
+ * avoid thread safety issues, use [streamCompletion] to block the main thread.
126
+ *
127
+ * @param request The data to send to the API endpoint.
128
+ * @param onResponse The code to execute for every chunk of text.
129
+ * @param onFailure The code to execute when a failure occurs.
93
130
* @since 1.3.0
94
131
*/
95
132
@JvmOverloads
@@ -109,6 +146,8 @@ class OpenAI @JvmOverloads constructor(
109
146
}
110
147
111
148
/* *
149
+ * Predicts which text comes after the prompt, thus "completing" the text.
150
+ *
112
151
* Calls OpenAI's Completions API using a *stream* of data. Streams allow
113
152
* developers to access tokens in real time as they are generated. This is
114
153
* used to create the "scrolling text" or "living typing" effect. Using
@@ -149,11 +188,13 @@ class OpenAI @JvmOverloads constructor(
149
188
}
150
189
151
190
/* *
191
+ * Predicts which text comes after the prompt, thus "completing" the text.
192
+ *
152
193
* Calls OpenAI's Completions API using a *stream* of data. Streams allow
153
194
* developers to access tokens in real time as they are generated. This is
154
195
* used to create the "scrolling text" or "living typing" effect. Using
155
- * `streamCompletion ` gives users information immediately, as opposed to
156
- * `createCompletion ` where you have to wait for the entire message to
196
+ * `streamCompletionAsync ` gives users information immediately, as opposed to
197
+ * `createCompletionAsync ` where you have to wait for the entire message to
157
198
* generate.
158
199
*
159
200
* This method will not block the current thread. The code block [onResponse]
@@ -183,9 +224,22 @@ class OpenAI @JvmOverloads constructor(
183
224
}
184
225
185
226
/* *
227
+ * Responds to the input in a conversational manner. Chat can "remember"
228
+ * older parts of the conversation by looking at the different messages in
229
+ * the list.
186
230
*
187
- * @param request The input information for the Completions API.
188
- * @return The value returned by the Completions API.
231
+ * Calls OpenAI's Completions API and waits until the entire message is
232
+ * generated. Since generating an entire CHAT message can be time-consuming,
233
+ * it is preferred to use [streamChatCompletionAsync] instead.
234
+ *
235
+ * This method blocks the current thread until the stream is complete. For
236
+ * non-blocking options, use [createChatCompletionAsync]. It is important to
237
+ * consider which thread you are currently running on. Running this method
238
+ * on [javax.swing]'s thread, for example, will cause your UI to freeze
239
+ * temporarily.
240
+ *
241
+ * @param request The data to send to the API endpoint.
242
+ * @return The generated response.
189
243
* @since 1.3.0
190
244
*/
191
245
@Throws(OpenAIError ::class )
@@ -195,7 +249,7 @@ class OpenAI @JvmOverloads constructor(
195
249
val httpRequest = buildRequest(request, CHAT_ENDPOINT )
196
250
197
251
try {
198
- val httpResponse = client.newCall(httpRequest).execute();
252
+ val httpResponse = client.newCall(httpRequest).execute()
199
253
lateinit var response: ChatResponse
200
254
MyCallback (true , { throw it }) {
201
255
response = gson.fromJson(it, ChatResponse ::class .java)
@@ -208,11 +262,22 @@ class OpenAI @JvmOverloads constructor(
208
262
}
209
263
210
264
/* *
211
- * Create completion async
265
+ * Responds to the input in a conversational manner. Chat can "remember"
266
+ * older parts of the conversation by looking at the different messages in
267
+ * the list.
212
268
*
213
- * @param request
214
- * @param onResponse
215
- * @param onFailure
269
+ * Calls OpenAI's Completions API and waits until the entire message is
270
+ * generated. Since generating an entire CHAT message can be time-consuming,
271
+ * it is preferred to use [streamChatCompletionAsync] instead.
272
+ *
273
+ * This method will not block the current thread. The code block [onResponse]
274
+ * will be run later on a different thread. Due to the different thread, it
275
+ * is important to consider thread safety in the context of your program. To
276
+ * avoid thread safety issues, use [streamChatCompletion] to block the main thread.
277
+ *
278
+ * @param request The data to send to the API endpoint.
279
+ * @param onResponse The code to execute for every chunk of text.
280
+ * @param onFailure The code to execute when a failure occurs.
216
281
* @since 1.3.0
217
282
*/
218
283
@JvmOverloads
@@ -232,6 +297,10 @@ class OpenAI @JvmOverloads constructor(
232
297
}
233
298
234
299
/* *
300
+ * Responds to the input in a conversational manner. Chat can "remember"
301
+ * older parts of the conversation by looking at the different messages in
302
+ * the list.
303
+ *
235
304
* Calls OpenAI's Completions API using a *stream* of data. Streams allow
236
305
* developers to access tokens in real time as they are generated. This is
237
306
* used to create the "scrolling text" or "living typing" effect. Using
@@ -277,17 +346,21 @@ class OpenAI @JvmOverloads constructor(
277
346
}
278
347
279
348
/* *
349
+ * Responds to the input in a conversational manner. Chat can "remember"
350
+ * older parts of the conversation by looking at the different messages in
351
+ * the list.
352
+ *
280
353
* Calls OpenAI's Completions API using a *stream* of data. Streams allow
281
354
* developers to access tokens in real time as they are generated. This is
282
- * used to create the "scrolling text" or "living typing" effect. Using
283
- * `streamCompletion ` gives users information immediately, as opposed to
284
- * `createCompletion` where you have to wait for the entire message to
355
+ * used to create the "scrolling text" or "live typing" effect. Using
356
+ * `streamChatCompletionAsync ` gives users information immediately, as opposed to
357
+ * [createChatCompletionAsync] where you have to wait for the entire message to
285
358
* generate.
286
359
*
287
360
* This method will not block the current thread. The code block [onResponse]
288
361
* will be run later on a different thread. Due to the different thread, it
289
362
* is important to consider thread safety in the context of your program. To
290
- * avoid thread safety issues, use [streamCompletion ] to block the main thread.
363
+ * avoid thread safety issues, use [streamChatCompletion ] to block the main thread.
291
364
*
292
365
* @param request The data to send to the API endpoint.
293
366
* @param onResponse The code to execute for every chunk of text.
0 commit comments