Skip to content

Commit e0bf673

Browse files
authored
refine warnings (#2641)
* refine warnings * fix * further improve * further improve * refine warnings * refine warnings * workaround emb warning for jit.script
1 parent c251356 commit e0bf673

39 files changed

+515
-268
lines changed

intel_extension_for_pytorch/__init__.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
import re
33

44
import torch
5-
import warnings
5+
66

77
try:
88
import torchvision
@@ -128,12 +128,14 @@
128128
from . import _dynamo
129129
from . import _meta_registrations
130130
from ._init_on_device import OnDevice
131+
from .utils._logger import logger, WarningType
131132

132133
try:
133134
from .cpu import tpp
134135
except BaseException:
135-
warnings.warn(
136-
"Please install transformers repo when you want to use fast_bert API."
136+
logger.warn(
137+
"Please install transformers repo when you want to use fast_bert API.",
138+
_type=WarningType.MissingArgument,
137139
)
138140

139141
from .frontend import optimize

intel_extension_for_pytorch/_inductor/compiler.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2,9 +2,8 @@
22
from torch._subclasses import FakeTensor
33
from torch.utils._mode_utils import no_dispatch
44
import builtins
5-
import warnings
65
from typing import Callable, Dict, Optional, Union, List
7-
6+
from ..utils._logger import logger, WarningType
87

98
_compiler_backend = "inductor"
109

@@ -66,7 +65,10 @@ def defake(x):
6665
traced_model = torch.jit.freeze(traced_model)
6766
return traced_model
6867
except Exception:
69-
warnings.warn("JIT trace failed during the IPEX compile process.")
68+
logger.warning(
69+
"JIT trace failed during the IPEX compile process.",
70+
_type=WarningType.NotSupported,
71+
)
7072
return model
7173
else:
7274
raise RuntimeError(

intel_extension_for_pytorch/_inductor/decomposition.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,14 @@
1-
import logging
21
import torch._decomp as decomp
32

4-
log = logging.getLogger(__name__)
3+
from ..utils._logger import logger
4+
55
decomposition_overrides = {}
66

77

88
def register_decomposition(ops):
99
for op in [ops] if callable(ops) else ops:
1010
if op in decomposition_overrides:
11-
log.warning(f"duplicate decomp: {ops}")
11+
logger.warning(f"duplicate decomp: {ops}")
1212
return decomp.register_decomposition(ops, decomposition_overrides)
1313

1414

intel_extension_for_pytorch/cpu/auto_ipex.py

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,17 +1,13 @@
11
import os
22
import platform
33
import glob
4-
import logging
4+
from ..utils._logger import logger, WarningType
55
import sys
66
from argparse import ArgumentParser, REMAINDER
77
from argparse import RawTextHelpFormatter
88
from tempfile import mkstemp
99
import uuid
1010

11-
format_str = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
12-
logging.basicConfig(level=logging.INFO, format=format_str)
13-
logger = logging.getLogger(__name__)
14-
1511

1612
def apply_monkey_patch(program, dtype, auto_ipex_verbose, disable_ipex_graph_mode):
1713
# Auto apply the ipex features
@@ -213,7 +209,9 @@ def main():
213209
lst_valid.append(item)
214210
else:
215211
logger.warning(
216-
"{} doesn't exist. Removing it from LD_PRELOAD.".format(item)
212+
f"You have set {item} into LD_PRELOAD but it doesn't exist. Removing it from LD_PRELOAD."
213+
+ "please install it if you want it or remove it from LD_PRELOAD if you don't",
214+
_type=WarningType.MissingDependency,
217215
)
218216
if len(lst_valid) > 0:
219217
os.environ["LD_PRELOAD"] = ":".join(lst_valid)

intel_extension_for_pytorch/cpu/graph_capture.py

Lines changed: 13 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -7,9 +7,9 @@
77
from typing import List
88

99
import functools
10-
import logging
1110
import threading
1211
import warnings
12+
from ..utils._logger import logger, WarningType
1313

1414

1515
class RunMethods(IntEnum):
@@ -37,7 +37,10 @@ def compiler(gm: torch.fx.GraphModule, example_inputs: List[torch.Tensor]):
3737
traced_model = torch.jit.freeze(traced_model)
3838
return traced_model
3939
except Exception:
40-
warnings.warn("JIT trace failed during the 'compiler' process.")
40+
logger.warning(
41+
"JIT trace failed during the 'compiler' process.",
42+
_type=WarningType.NotSupported,
43+
)
4144
return gm
4245

4346
@functools.wraps(func)
@@ -62,8 +65,9 @@ def forward(*input, **kwargs):
6265
else:
6366
return self.model(*input, **kwargs)
6467
if self.train:
65-
warnings.warn(
66-
"graph capture does not support training yet."
68+
logger.warning(
69+
"graph capture does not support training yet.",
70+
_type=WarningType.NotSupported,
6771
)
6872
self.method = RunMethods.EagerTrain
6973
return func(*input, **kwargs)
@@ -89,7 +93,7 @@ def forward(*input, **kwargs):
8993
output = traced_model(*input, **kwargs)
9094
self.model = traced_model
9195
self.method = RunMethods.JIT
92-
logging.debug("generate graph by JIT trace.")
96+
logger.debug("generate graph by JIT trace.")
9397
return output
9498
except BaseException:
9599
try:
@@ -101,11 +105,12 @@ def forward(*input, **kwargs):
101105
output = dynamo_model(*input, **kwargs)
102106
self.model = dynamo_model
103107
self.method = RunMethods.TorchDynamo
104-
logging.debug("generate graph by TorchDynamo.")
108+
logger.debug("generate graph by TorchDynamo.")
105109
return output
106110
except BaseException:
107-
warnings.warn(
108-
"Both JIT and TorchDynamo failed, fallback to original model."
111+
logger.warning(
112+
"Both JIT and TorchDynamo failed, fallback to original model.",
113+
_type=WarningType.NotSupported,
109114
)
110115
self.method = RunMethods.EagerInfer
111116
torch._dynamo.reset()

intel_extension_for_pytorch/cpu/hypertune/objective.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
# reference: https://github.com/intel/neural-compressor/blob/\
22
# 15477100cef756e430c8ef8ef79729f0c80c8ce6/neural_compressor/objective.py
33
import subprocess
4+
from ...utils._logger import logger, WarningType
45

56

67
class MultiObjective(object):
@@ -39,7 +40,10 @@ def deprecate_config(self, cfg, deprecated, new, default):
3940
), f"Configurations {deprecated} and {new} cannot be set at the same time."
4041
ret = default
4142
if v_deprecated != default:
42-
print(f"[**Warning**] Configuration {deprecated} is deprecated by {new}.")
43+
logger.warn(
44+
f"[**Warning**] Configuration {deprecated} is deprecated by {new}.",
45+
_type=WarningType.DeprecatedArgument,
46+
)
4347
ret = v_deprecated
4448
if v_new != default:
4549
ret = v_new

intel_extension_for_pytorch/cpu/launch/cpu_info.py

Lines changed: 19 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
import platform
44
import re
55
import subprocess
6+
from ...utils._logger import WarningType
67

78
# lscpu Examples
89
# # The following is the parsable format, which can be fed to other
@@ -206,7 +207,7 @@ def __init__(self, logger=None, lscpu_txt=""):
206207
if c.maxmhz in e_core_mhzs:
207208
c.is_p_core = False
208209

209-
def verbose(self, level, msg):
210+
def verbose(self, level, msg, warning_type=None):
210211
if self.logger:
211212
logging_fn = {
212213
"warning": self.logger.warning,
@@ -215,7 +216,7 @@ def verbose(self, level, msg):
215216
assert (
216217
level in logging_fn.keys()
217218
), f"Unrecognized logging level {level} is detected. Available levels are {logging_fn.keys()}."
218-
logging_fn[level](msg)
219+
logging_fn[level](msg, _type=warning_type)
219220
else:
220221
print(msg)
221222

@@ -264,12 +265,18 @@ def gen_pools_ondemand(
264265
if use_logical_cores:
265266
self.verbose(
266267
"warning",
267-
"Argument --use-logical-cores won't take effect when --cores-list is set.",
268+
"Argument --use-logical-cores won't take effect when --cores-list is set."
269+
+ "please see https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/performance_tuning/launch_script.html#launch-script-usage-guide" # noqa: B950
270+
+ "for usage guide",
271+
warning_type=WarningType.AmbiguousArgument,
268272
)
269273
if use_e_cores:
270274
self.verbose(
271275
"warning",
272276
"Argument --use-e-cores won't take effect when --cores-list is set.",
277+
+"please see https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/performance_tuning/launch_script.html#launch-script-usage-guide" # noqa: B950
278+
+ "for usage guide",
279+
warning_type=WarningType.AmbiguousArgument,
273280
)
274281
pool = [c for c in self.pool_all if c.cpu in cores_list]
275282
nodes = list(set([c.node for c in pool]))
@@ -284,6 +291,9 @@ def gen_pools_ondemand(
284291
self.verbose(
285292
"warning",
286293
"Argument --skip-cross-node-cores cannot take effect on the designated cores. Disabled.",
294+
+"please see https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/performance_tuning/launch_script.html#launch-script-usage-guide" # noqa: B950
295+
+ "for usage guide",
296+
warning_type=WarningType.WrongArgument,
287297
)
288298
break
289299
else:
@@ -302,7 +312,7 @@ def gen_pools_ondemand(
302312
e_cores = [c.cpu for c in pool if not c.is_p_core]
303313
if len(e_cores) > 0:
304314
self.verbose(
305-
"warning",
315+
"info",
306316
f"Efficient-Cores are detected ({e_cores}). Disabled for performance consideration. \
307317
You can enable them with argument --use-e-cores.",
308318
)
@@ -348,8 +358,11 @@ def gen_pools_ondemand(
348358
if skip_cross_node_cores:
349359
self.verbose(
350360
"warning",
351-
"Argument --skip-cross-node-cores won't take effect when both --ninstances and \
352-
--ncores-per-instance are explicitly set.",
361+
"Argument --skip-cross-node-cores won't take effect when both --ninstances and"
362+
+ " --ncores-per-instance are explicitly set."
363+
+ "please see https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/performance_tuning/launch_script.html#launch-script-usage-guide" # noqa: B950
364+
+ "for usage guide",
365+
warning_type=WarningType.AmbiguousArgument,
353366
)
354367
assert (
355368
ninstances * ncores_per_instance > 0

intel_extension_for_pytorch/cpu/launch/launch.py

Lines changed: 42 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
import intel_extension_for_pytorch.cpu.auto_ipex as auto_ipex
99
from .launcher_distributed import DistributedTrainingLauncher
1010
from .launcher_multi_instances import MultiInstancesLauncher
11+
from ...utils._logger import logger, WarningType
1112

1213
"""
1314
This is a script for launching PyTorch training and inference on Intel Xeon CPU with optimal configurations.
@@ -220,53 +221,72 @@ def add_deprecated_params(parser):
220221

221222
def process_deprecated_params(args, logger):
222223
if args.nproc_per_node != -1:
223-
logger.warning("Argument --nproc_per_node is deprecated by --nprocs-per-node.")
224+
logger.warning(
225+
"Argument --nproc_per_node is deprecated by --nprocs-per-node.",
226+
_type=WarningType.DeprecatedArgument,
227+
)
224228
args.nprocs_per_node = args.nproc_per_node
225229
if args.more_mpi_params != "":
226230
logger.warning(
227-
"Argument --more_mpi_params is deprecated by --extra-mpi-params."
231+
"Argument --more_mpi_params is deprecated by --extra-mpi-params.",
232+
_type=WarningType.DeprecatedArgument,
228233
)
229234
args.extra_mpi_params = args.more_mpi_params
230235
if args.ncore_per_instance != -1:
231236
logger.warning(
232-
"Argument --ncore_per_instance is deprecated by --ncores-per-instance."
237+
"Argument --ncore_per_instance is deprecated by --ncores-per-instance.",
238+
_type=WarningType.DeprecatedArgument,
233239
)
234240
args.ncores_per_instance = args.ncore_per_instance
235241
if args.node_id != -1:
236-
logger.warning("Argument --node_id is deprecated by --nodes-list.")
242+
logger.warning(
243+
"Argument --node_id is deprecated by --nodes-list.",
244+
_type=WarningType.DeprecatedArgument,
245+
)
237246
args.nodes_list = str(args.node_id)
238247
if args.core_list != "":
239-
logger.warning("Argument --core_list is deprecated by --cores-list.")
248+
logger.warning(
249+
"Argument --core_list is deprecated by --cores-list.",
250+
_type=WarningType.DeprecatedArgument,
251+
)
240252
args.cores_list = args.core_list
241253
if args.logical_core_for_ccl:
242254
logger.warning(
243-
"Argument --logical_core_for_ccl is deprecated by --logical-cores-for-ccl."
255+
"Argument --logical_core_for_ccl is deprecated by --logical-cores-for-ccl.",
256+
_type=WarningType.DeprecatedArgument,
244257
)
245258
args.logical_cores_for_ccl = args.logical_core_for_ccl
246259
if args.use_logical_core:
247260
logger.warning(
248-
"Argument --use_logical_core is deprecated by --use-logical-cores."
261+
"Argument --use_logical_core is deprecated by --use-logical-cores.",
262+
_type=WarningType.DeprecatedArgument,
249263
)
250264
args.use_logical_cores = args.use_logical_core
251265
if args.log_path != "":
252-
logger.warning("Argument --log_path is deprecated by --log-dir.")
266+
logger.warning(
267+
"Argument --log_path is deprecated by --log-dir.",
268+
_type=WarningType.DeprecatedArgument,
269+
)
253270
args.log_dir = args.log_path
254271

255272
if args.multi_instance:
256273
logger.info(
257-
"Argument --multi_instance is deprecated. Will be removed. \
258-
If you are using the deprecated argument, please update it to the new one."
274+
"Argument --multi_instance is deprecated. Will be removed."
275+
+ "If you are using the deprecated argument, please update it to the new one.",
276+
_type=WarningType.DeprecatedArgument,
259277
)
260278
if args.distributed:
261279
logger.info(
262-
"Argument --distributed is deprecated. Will be removed. \
263-
If you are using the deprecated argument, please update it to the new one."
280+
"Argument --distributed is deprecated. Will be removed."
281+
+ "If you are using the deprecated argument, please update it to the new one.",
282+
_type=WarningType.DeprecatedArgument,
264283
)
265284

266285
if args.enable_tcmalloc or args.enable_jemalloc or args.use_default_allocator:
267286
logger.warning(
268-
"Arguments --enable_tcmalloc, --enable_jemalloc and --use_default_allocator \
269-
are deprecated by --memory-allocator."
287+
"Arguments --enable_tcmalloc, --enable_jemalloc and --use_default_allocator"
288+
+ "are deprecated by --memory-allocator tcmalloc/jemalloc/auto.",
289+
_type=WarningType.DeprecatedArgument,
270290
)
271291
if args.use_default_allocator:
272292
args.memory_allocator = "default"
@@ -276,16 +296,21 @@ def process_deprecated_params(args, logger):
276296
args.memory_allocator = "tcmalloc"
277297
if args.disable_numactl:
278298
logger.warning(
279-
"Argument --disable_numactl is deprecated by --multi-task-manager."
299+
"Argument --disable_numactl is deprecated by --multi-task-manager taskset.",
300+
_type=WarningType.DeprecatedArgument,
280301
)
281302
args.multi_task_manager = "taskset"
282303
if args.disable_taskset:
283304
logger.warning(
284-
"Argument --disable_taskset is deprecated by --multi-task-manager."
305+
"Argument --disable_taskset is deprecated by --multi-task-manager numactl.",
306+
_type=WarningType.DeprecatedArgument,
285307
)
286308
args.multi_task_manager = "numactl"
287309
if args.disable_iomp:
288-
logger.warning("Argument --disable_iomp is deprecated by --omp-runtime.")
310+
logger.warning(
311+
"Argument --disable_iomp is deprecated by --omp-runtime default.",
312+
_type=WarningType.DeprecatedArgument,
313+
)
289314
args.omp_runtime = "default"
290315

291316

@@ -383,10 +408,6 @@ def run_main_with_args(args):
383408
if platform.system() == "Windows":
384409
raise RuntimeError("Windows platform is not supported!!!")
385410

386-
format_str = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
387-
logging.basicConfig(level=logging.INFO, format=format_str)
388-
logger = logging.getLogger(__name__)
389-
390411
launcher_distributed = DistributedTrainingLauncher(logger)
391412
launcher_multi_instances = MultiInstancesLauncher(logger)
392413

0 commit comments

Comments
 (0)