@@ -4,6 +4,7 @@ import com.cjcrafter.openai.gson.ChatChoiceChunkAdapter
4
4
import com.cjcrafter.openai.chat.*
5
5
import com.cjcrafter.openai.completions.CompletionRequest
6
6
import com.cjcrafter.openai.completions.CompletionResponse
7
+ import com.cjcrafter.openai.completions.CompletionResponseChunk
7
8
import com.cjcrafter.openai.exception.OpenAIError
8
9
import com.cjcrafter.openai.exception.WrappedIOError
9
10
import com.cjcrafter.openai.gson.ChatUserAdapter
@@ -16,6 +17,7 @@ import okhttp3.*
16
17
import okhttp3.MediaType.Companion.toMediaType
17
18
import okhttp3.RequestBody.Companion.toRequestBody
18
19
import java.io.IOException
20
+ import java.lang.IllegalStateException
19
21
import java.util.function.Consumer
20
22
21
23
/* *
@@ -56,22 +58,27 @@ class OpenAI @JvmOverloads constructor(
56
58
.post(body).build()
57
59
}
58
60
61
+ /* *
62
+ * Create completion
63
+ *
64
+ * @param request
65
+ * @return
66
+ * @since 1.3.0
67
+ */
59
68
@Throws(OpenAIError ::class )
60
69
fun createCompletion (request : CompletionRequest ): CompletionResponse {
61
70
@Suppress(" DEPRECATION" )
62
71
request.stream = false // use streamCompletion for stream=true
63
72
val httpRequest = buildRequest(request, " completions" )
64
73
65
- // Save the JsonObject to check for errors
66
- var rootObject: JsonObject ?
67
74
try {
68
75
client.newCall(httpRequest).execute().use { response ->
69
76
70
77
// Servers respond to API calls with json blocks. Since raw JSON isn't
71
78
// very developer friendly, we wrap for easy data access.
72
- rootObject = JsonParser .parseString(response.body!! .string()).asJsonObject
73
- if (rootObject!! .has(" error" ))
74
- throw OpenAIError .fromJson(rootObject!! .get(" error" ).asJsonObject)
79
+ val rootObject = JsonParser .parseString(response.body!! .string()).asJsonObject
80
+ if (rootObject.has(" error" ))
81
+ throw OpenAIError .fromJson(rootObject.get(" error" ).asJsonObject)
75
82
76
83
return gson.fromJson(rootObject, CompletionResponse ::class .java)
77
84
}
@@ -81,6 +88,78 @@ class OpenAI @JvmOverloads constructor(
81
88
}
82
89
}
83
90
91
+ /* *
92
+ * Helper method to call [streamCompletion].
93
+ *
94
+ * @param request The input information for ChatGPT.
95
+ * @param onResponse The method to call for each chunk.
96
+ * @since 1.3.0
97
+ */
98
+ fun streamCompletionKotlin (request : CompletionRequest , onResponse : CompletionResponseChunk .() -> Unit ) {
99
+ streamCompletion(request, { it.onResponse() })
100
+ }
101
+
102
+ /* *
103
+ * This method does not block the thread. Method calls to [onResponse] are
104
+ * not handled by the main thread. It is crucial to consider thread safety
105
+ * within the context of your program.
106
+ *
107
+ * @param request The input information for ChatGPT.
108
+ * @param onResponse The method to call for each chunk.
109
+ * @param onFailure The method to call if the HTTP fails. This method will
110
+ * not be called if OpenAI returns an error.
111
+ * @see createCompletion
112
+ * @see streamCompletionKotlin
113
+ * @since 1.3.0
114
+ */
115
+ @JvmOverloads
116
+ fun streamCompletion (
117
+ request : CompletionRequest ,
118
+ onResponse : Consumer <CompletionResponseChunk >, // use Consumer instead of Kotlin for better Java syntax
119
+ onFailure : Consumer <OpenAIError > = Consumer { it.printStackTrace() }
120
+ ) {
121
+ @Suppress(" DEPRECATION" )
122
+ request.stream = true // use requestResponse for stream=false
123
+ val httpRequest = buildRequest(request, " completions" )
124
+
125
+ client.newCall(httpRequest).enqueue(object : Callback {
126
+
127
+ override fun onFailure (call : Call , e : IOException ) {
128
+ onFailure.accept(WrappedIOError (e))
129
+ }
130
+
131
+ override fun onResponse (call : Call , response : Response ) {
132
+ response.body?.source()?.use { source ->
133
+ while (! source.exhausted()) {
134
+
135
+ // Parse the JSON string as a map. Every string starts
136
+ // with "data: ", so we need to remove that.
137
+ var jsonResponse = source.readUtf8Line() ? : continue
138
+ if (jsonResponse.isEmpty())
139
+ continue
140
+
141
+ // TODO comment
142
+ if (! jsonResponse.startsWith(" data: " )) {
143
+ System .err.println (jsonResponse)
144
+ continue
145
+ }
146
+
147
+ jsonResponse = jsonResponse.substring(" data: " .length)
148
+ if (jsonResponse == " [DONE]" )
149
+ continue
150
+
151
+ val rootObject = JsonParser .parseString(jsonResponse).asJsonObject
152
+ if (rootObject.has(" error" ))
153
+ throw OpenAIError .fromJson(rootObject.get(" error" ).asJsonObject)
154
+
155
+ val cache = gson.fromJson(rootObject, CompletionResponseChunk ::class .java)
156
+ onResponse.accept(cache)
157
+ }
158
+ }
159
+ }
160
+ })
161
+ }
162
+
84
163
/* *
85
164
* Blocks the current thread until OpenAI responds to https request. The
86
165
* returned value includes information including tokens, generated text,
@@ -97,16 +176,14 @@ class OpenAI @JvmOverloads constructor(
97
176
request.stream = false // use streamResponse for stream=true
98
177
val httpRequest = buildRequest(request, " chat/completions" )
99
178
100
- // Save the JsonObject to check for errors
101
- var rootObject: JsonObject ?
102
179
try {
103
180
client.newCall(httpRequest).execute().use { response ->
104
181
105
182
// Servers respond to API calls with json blocks. Since raw JSON isn't
106
183
// very developer friendly, we wrap for easy data access.
107
- rootObject = JsonParser .parseString(response.body!! .string()).asJsonObject
108
- if (rootObject!! .has(" error" ))
109
- throw OpenAIError .fromJson(rootObject!! .get(" error" ).asJsonObject)
184
+ val rootObject = JsonParser .parseString(response.body!! .string()).asJsonObject
185
+ if (rootObject.has(" error" ))
186
+ throw OpenAIError .fromJson(rootObject.get(" error" ).asJsonObject)
110
187
111
188
return gson.fromJson(rootObject, ChatResponse ::class .java)
112
189
}
@@ -176,7 +253,7 @@ class OpenAI @JvmOverloads constructor(
176
253
fun streamChatCompletion (
177
254
request : ChatRequest ,
178
255
onResponse : Consumer <ChatResponseChunk >, // use Consumer instead of Kotlin for better Java syntax
179
- onFailure : Consumer <IOException > = Consumer { it.printStackTrace() }
256
+ onFailure : Consumer <WrappedIOError > = Consumer { it.printStackTrace() }
180
257
) {
181
258
@Suppress(" DEPRECATION" )
182
259
request.stream = true // use requestResponse for stream=false
@@ -186,7 +263,7 @@ class OpenAI @JvmOverloads constructor(
186
263
var cache: ChatResponseChunk ? = null
187
264
188
265
override fun onFailure (call : Call , e : IOException ) {
189
- onFailure.accept(e )
266
+ onFailure.accept(WrappedIOError (e) )
190
267
}
191
268
192
269
override fun onResponse (call : Call , response : Response ) {
@@ -203,6 +280,9 @@ class OpenAI @JvmOverloads constructor(
203
280
continue
204
281
205
282
val rootObject = JsonParser .parseString(jsonResponse).asJsonObject
283
+ if (rootObject.has(" error" ))
284
+ throw OpenAIError .fromJson(rootObject.get(" error" ).asJsonObject)
285
+
206
286
if (cache == null )
207
287
cache = gson.fromJson(rootObject, ChatResponseChunk ::class .java)
208
288
else
0 commit comments