Skip to content

Commit b8f88fd

Browse files
committed
Use singleton logger instead of print in generate
1 parent 4b8cdcb commit b8f88fd

File tree

1 file changed

+7
-2
lines changed

1 file changed

+7
-2
lines changed

torchchat/distributed/generate.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,9 @@
2020
import torch.multiprocessing as mp
2121
from torchchat.cli.builder import BuilderArgs, TokenizerArgs
2222
from torchchat.distributed.dist_run import NAME_TO_DISTRIBUTION_AND_DTYPE
23+
from torchchat.distributed.logging_utils import SingletonLogger
24+
25+
logger = SingletonLogger.get_logger()
2326

2427

2528
def _setup_env(world_size: int, rank: int, target: callable, *args, **kwargs):
@@ -37,7 +40,7 @@ def _launch_distributed_inference(
3740
model_name: str, builder_args: BuilderArgs, tokenizer_args: TokenizerArgs
3841
) -> tuple[List]:
3942
# create programmatic elastic launch
40-
print("Launching distributed inference ...")
43+
logger.info("Launching distributed inference ...")
4144

4245
num_processes_per_node = builder_args.pp * builder_args.tp
4346

@@ -59,7 +62,9 @@ def _launch_distributed_inference(
5962
for pipe in pipes:
6063
response = pipe.recv()
6164

62-
print(f"Done launching distributed inference on {num_processes_per_node} GPUs.")
65+
logger.info(
66+
f"Done launching distributed inference on {num_processes_per_node} GPUs."
67+
)
6368
return procs, pipes
6469

6570

0 commit comments

Comments
 (0)