Skip to content

Commit 7f574bd

Browse files
committed
better
1 parent c26b0bb commit 7f574bd

File tree

5 files changed

+20
-10
lines changed

5 files changed

+20
-10
lines changed

packages/inference/src/snippets/templates/python/requests/conversationalStream.jinja

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,9 +7,11 @@ def query(payload):
77
return
88
yield json.loads(line.decode("utf-8").lstrip("data:").rstrip("/n"))
99

10-
for chunk in query({
10+
chunks = query({
1111
"model": "{{ providerModelId }}",
1212
{{ inputs.asJsonString }},
1313
"stream": True,
14-
}):
14+
})
15+
16+
for chunk in chunks:
1517
print(chunk["choices"][0]["delta"]["content"], end="")

packages/tasks-gen/snippets-fixtures/conversational-llm-stream/1.requests.hf-inference.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ def query(payload):
1313
return
1414
yield json.loads(line.decode("utf-8").lstrip("data:").rstrip("/n"))
1515

16-
for chunk in query({
16+
chunks = query({
1717
"model": "meta-llama/Llama-3.1-8B-Instruct",
1818
"messages": [
1919
{
@@ -23,5 +23,7 @@ def query(payload):
2323
],
2424
"max_tokens": 500,
2525
"stream": True,
26-
}):
26+
})
27+
28+
for chunk in chunks:
2729
print(chunk["choices"][0]["delta"]["content"], end="")

packages/tasks-gen/snippets-fixtures/conversational-llm-stream/1.requests.together.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ def query(payload):
1313
return
1414
yield json.loads(line.decode("utf-8").lstrip("data:").rstrip("/n"))
1515

16-
for chunk in query({
16+
chunks = query({
1717
"model": "<together alias for meta-llama/Llama-3.1-8B-Instruct>",
1818
"messages": [
1919
{
@@ -23,5 +23,7 @@ def query(payload):
2323
],
2424
"max_tokens": 500,
2525
"stream": True,
26-
}):
26+
})
27+
28+
for chunk in chunks:
2729
print(chunk["choices"][0]["delta"]["content"], end="")

packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/1.requests.fireworks-ai.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ def query(payload):
1313
return
1414
yield json.loads(line.decode("utf-8").lstrip("data:").rstrip("/n"))
1515

16-
for chunk in query({
16+
chunks = query({
1717
"model": "<fireworks-ai alias for meta-llama/Llama-3.2-11B-Vision-Instruct>",
1818
"messages": [
1919
{
@@ -34,5 +34,7 @@ def query(payload):
3434
],
3535
"max_tokens": 500,
3636
"stream": True,
37-
}):
37+
})
38+
39+
for chunk in chunks:
3840
print(chunk["choices"][0]["delta"]["content"], end="")

packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/1.requests.hf-inference.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ def query(payload):
1313
return
1414
yield json.loads(line.decode("utf-8").lstrip("data:").rstrip("/n"))
1515

16-
for chunk in query({
16+
chunks = query({
1717
"model": "meta-llama/Llama-3.2-11B-Vision-Instruct",
1818
"messages": [
1919
{
@@ -34,5 +34,7 @@ def query(payload):
3434
],
3535
"max_tokens": 500,
3636
"stream": True,
37-
}):
37+
})
38+
39+
for chunk in chunks:
3840
print(chunk["choices"][0]["delta"]["content"], end="")

0 commit comments

Comments
 (0)