Skip to content

Commit e6d2e4c

Browse files
committed
refactor a bit
1 parent 68018d9 commit e6d2e4c

File tree

1 file changed

+10
-8
lines changed

1 file changed

+10
-8
lines changed

packages/tasks/src/snippets/python.ts

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -33,12 +33,14 @@ export const snippetConversational = (
3333
const messages: ChatCompletionInputMessage[] = opts?.messages ?? [
3434
{ role: "user", content: "What is the capital of France?" },
3535
];
36+
const messagesStr = formatGenerationMessages({ messages, sep: ",\n\t", start: `[\n\t`, end: `\n]` });
3637

3738
const config = {
3839
temperature: opts?.temperature,
3940
max_tokens: opts?.max_tokens ?? 500,
4041
top_p: opts?.top_p,
4142
};
43+
const configStr = formatGenerationConfig({ config, sep: ",\n\t", start: "", end: "", connector: "=" });
4244

4345
if (streaming) {
4446
return [
@@ -48,12 +50,12 @@ export const snippetConversational = (
4850
4951
client = InferenceClient(api_key="${accessToken || "{API_TOKEN}"}")
5052
51-
messages = ${formatGenerationMessages({ messages, sep: ",\n\t", start: `[\n\t`, end: `\n]` })}
53+
messages = ${messagesStr}
5254
5355
stream = client.chat.completions.create(
5456
model="${model.id}",
5557
messages=messages,
56-
${formatGenerationConfig({ config, sep: ",\n\t", start: "", end: "", connector: "=" })},
58+
${configStr},
5759
stream=True
5860
)
5961
@@ -69,12 +71,12 @@ client = OpenAI(
6971
api_key="${accessToken || "{API_TOKEN}"}"
7072
)
7173
72-
messages = ${formatGenerationMessages({ messages, sep: ",\n\t", start: `[\n\t`, end: `\n]` })}
74+
messages = ${messagesStr}
7375
7476
stream = client.chat.completions.create(
7577
model="${model.id}",
7678
messages=messages,
77-
${formatGenerationConfig({ config, sep: ",\n\t", start: "", end: "", connector: "=" })},
79+
${configStr},
7880
stream=True
7981
)
8082
@@ -90,12 +92,12 @@ for chunk in stream:
9092
9193
client = InferenceClient(api_key="${accessToken || "{API_TOKEN}"}")
9294
93-
messages = ${formatGenerationMessages({ messages, sep: ",\n\t", start: `[\n\t`, end: `\n]` })}
95+
messages = ${messagesStr}
9496
9597
completion = client.chat.completions.create(
9698
model="${model.id}",
9799
messages=messages,
98-
${formatGenerationConfig({ config, sep: ",\n\t", start: "", end: "", connector: "=" })}
100+
${configStr}
99101
)
100102
101103
print(completion.choices[0].message)`,
@@ -109,12 +111,12 @@ client = OpenAI(
109111
api_key="${accessToken || "{API_TOKEN}"}"
110112
)
111113
112-
messages = ${formatGenerationMessages({ messages, sep: ",\n\t", start: `[\n\t`, end: `\n]` })}
114+
messages = ${messagesStr}
113115
114116
completion = client.chat.completions.create(
115117
model="${model.id}",
116118
messages=messages,
117-
${formatGenerationConfig({ config, sep: ",\n\t", start: "", end: "", connector: "=" })}
119+
${configStr}
118120
)
119121
120122
print(completion.choices[0].message)`,

0 commit comments

Comments
 (0)