We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 99bcd76 commit 1bb36feCopy full SHA for 1bb36fe
backends/vulkan/_passes/int4_weight_only_quantizer.py
@@ -7,7 +7,7 @@
7
import torch
8
import torch.nn.functional as F
9
10
-from torchao.quantization.GPTQ import _check_linear_int4_k
+from torchao.quantization.GPTQ.GPTQ import _check_linear_int4_k
11
from torchao.quantization.unified import Quantizer
12
from torchao.quantization.utils import groupwise_affine_quantize_tensor
13
0 commit comments