Skip to content

Commit 570ff04

Browse files
authored
Fix browser init (#797)
Update prompt chat is waiting for, which was modified by #476 Modify logging defaults to not create a file in a temp folder without prompting user, but rather just print an info messages Replace few `prints` with `logging.info` This way, information about bandwith achieved will be printed to the console, but not to the web-browser chat window Test plan: ``` % python3 torchchat.py browser stories110M & % curl -L http://127.0.0.1:5000 % curl -d "prompt=Once upon a time" -X POST http://127.0.0.1:5000/chat ``` TODOs: - Add CI that repeats above steps -Figure out if spawning generator from the browser can be avoided Fixes #785
1 parent 6a1400e commit 570ff04

File tree

3 files changed

+7
-8
lines changed

3 files changed

+7
-8
lines changed

chat_in_browser.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ def main():
3535
except:
3636
continue
3737

38-
if decoded.startswith("System Prompt") and decoded.endswith(": "):
38+
if decoded.endswith("Do you want to enter a system prompt? Enter y for yes and anything else for no. \n"):
3939
print(f"| {decoded}")
4040
proc.stdin.write("\n".encode("utf-8"))
4141
proc.stdin.flush()
@@ -93,6 +93,8 @@ def chat():
9393
model_prefix = "Model: "
9494
if output.startswith(model_prefix):
9595
output = output[len(model_prefix) :]
96+
else:
97+
print("But output is", output)
9698

9799
global convo
98100

cli.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -15,10 +15,7 @@
1515
from build.utils import allowable_dtype_names, allowable_params_table, get_device_str
1616
from download import download_and_convert, is_model_downloaded
1717

18-
FORMAT = (
19-
"%(levelname)s: %(asctime)-15s: %(filename)s: %(funcName)s: %(module)s: %(message)s"
20-
)
21-
logging.basicConfig(filename="/tmp/torchchat.log", level=logging.INFO, format=FORMAT)
18+
logging.basicConfig(level=logging.INFO,format="%(message)s")
2219
logger = logging.getLogger(__name__)
2320

2421
default_device = os.getenv("TORCHCHAT_DEVICE", "fast")

generate.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -752,12 +752,12 @@ def callback(x):
752752
# Don't continue here.... because we need to report and reset
753753
# continue
754754

755-
print(
755+
logging.info(
756756
f"Time for inference {i + 1}: {t:.02f} sec total, {tokens_sec:.02f} tokens/sec"
757757
)
758-
print(f"Bandwidth achieved: {model_size * tokens_sec / 1e9:.02f} GB/s")
758+
logging.info(f"Bandwidth achieved: {model_size * tokens_sec / 1e9:.02f} GB/s")
759759
if i == 0:
760-
print(
760+
logging.info(
761761
f"*** This first iteration will include cold start effects for dynamic import, hardware caches{', JIT compilation' if jit_compile else ''}. ***"
762762
)
763763
if start_pos >= max_seq_length:

0 commit comments

Comments
 (0)