You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Copy file name to clipboardExpand all lines: llmstack/fixtures/initial_data.json
+46-10Lines changed: 46 additions & 10 deletions
Original file line number
Diff line number
Diff line change
@@ -91,7 +91,9 @@
91
91
"api_endpoint": "completions",
92
92
"params": {
93
93
"type": "object",
94
-
"required": ["model"],
94
+
"required": [
95
+
"model"
96
+
],
95
97
"properties": {
96
98
"n": {
97
99
"type": "integer",
@@ -178,7 +180,11 @@
178
180
"description": "The number of images to generate. Must be between 1 and 10."
179
181
},
180
182
"size": {
181
-
"enum": ["256x256", "512x512", "1024x1024"],
183
+
"enum": [
184
+
"256x256",
185
+
"512x512",
186
+
"1024x1024"
187
+
],
182
188
"type": "string",
183
189
"default": "1024x1024",
184
190
"description": "The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024."
@@ -188,7 +194,10 @@
188
194
"description": "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse."
189
195
},
190
196
"response_format": {
191
-
"enum": ["url", "b64_json"],
197
+
"enum": [
198
+
"url",
199
+
"b64_json"
200
+
],
192
201
"type": "string",
193
202
"default": "url",
194
203
"description": "The format in which the generated images are returned. Must be one of url or b64_json."
@@ -211,7 +220,10 @@
211
220
"api_endpoint": "text2image",
212
221
"params": {
213
222
"type": "object",
214
-
"required": ["engine", "seed"],
223
+
"required": [
224
+
"engine",
225
+
"seed"
226
+
],
215
227
"properties": {
216
228
"seed": {
217
229
"type": "integer",
@@ -296,7 +308,9 @@
296
308
"api_endpoint": "generate",
297
309
"params": {
298
310
"type": "object",
299
-
"required": ["model"],
311
+
"required": [
312
+
"model"
313
+
],
300
314
"properties": {
301
315
"k": {
302
316
"type": "integer",
@@ -321,7 +335,11 @@
321
335
"description": "The ID of a custom playground preset. You can create presets in the playground. If you use a preset, the prompt parameter becomes optional, and any included parameters will override the preset's parameters."
322
336
},
323
337
"truncate": {
324
-
"enum": ["NONE", "START", "END"],
338
+
"enum": [
339
+
"NONE",
340
+
"START",
341
+
"END"
342
+
],
325
343
"type": "string",
326
344
"default": "END",
327
345
"description": "Passing START will discard the start of the input. END will discard the end of the input. In both cases, input is discarded until the remaining input is exactly the maximum input token length for the model.If NONE is selected, when the input exceeds the maximum input token length an error will be returned."
@@ -364,7 +382,11 @@
364
382
"description": "Can be used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation."
365
383
},
366
384
"return_likelihoods": {
367
-
"enum": ["GENERATION", "ALL", "NONE"],
385
+
"enum": [
386
+
"GENERATION",
387
+
"ALL",
388
+
"NONE"
389
+
],
368
390
"type": "string",
369
391
"default": "NONE",
370
392
"description": "If GENERATION is selected, the token likelihoods will only be provided for generated text.If ALL is selected, the token likelihoods will be provided both for the prompt and the generated text."
@@ -387,7 +409,9 @@
387
409
"api_endpoint": "chat/completions",
388
410
"params": {
389
411
"type": "object",
390
-
"required": ["model"],
412
+
"required": [
413
+
"model"
414
+
],
391
415
"properties": {
392
416
"n": {
393
417
"type": "integer",
@@ -399,7 +423,10 @@
399
423
"description": "Up to 4 sequences where the API will stop generating further tokens."
400
424
},
401
425
"model": {
402
-
"enum": ["gpt-3.5-turbo", "gpt-3.5-turbo-0301"],
426
+
"enum": [
427
+
"gpt-3.5-turbo",
428
+
"gpt-3.5-turbo-0301"
429
+
],
403
430
"type": "string",
404
431
"default": "gpt-3.5-turbo",
405
432
"description": "ID of the model to use. Currently, only gpt-3.5-turbo and gpt-3.5-turbo-0301 are supported."
0 commit comments