Skip to content

Commit 7d37b50

Browse files
committed
Fix typos
1 parent e089313 commit 7d37b50

File tree

1 file changed

+6
-6
lines changed

1 file changed

+6
-6
lines changed

conversion/adaptivegptq.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,7 @@ class AdaptiveGPTQ:
9090
perm_cpu: torch.tensor = None
9191
invperm: torch.tensor = None
9292

93-
g_idx: torch.tensor = None
93+
# g_idx: torch.tensor = None
9494
scale: torch.tensor = None
9595
qscale: torch.tensor = None
9696
qscale_max: torch.tensor = None
@@ -125,7 +125,7 @@ def drop_buffers(self):
125125
self.perm = None
126126
self.perm_cpu = None
127127
self.invperm = None
128-
self.g_idx = None
128+
# self.g_idx = None
129129
self.scale = None
130130
self.qscale = None
131131
self.qscale_max = None
@@ -389,10 +389,10 @@ def quantize(self, keep_qweight = False, apply = False, drop = False):
389389
# Create g_idx to store inverse activation order
390390

391391
# self.g_idx = torch.tensor(group_idx_list, dtype = torch.int32, device = self.device)
392-
self.g_idx = torch.tensor(group_idx_list, dtype = torch.int32)
392+
# self.g_idx = torch.tensor(group_idx_list, dtype = torch.int32)
393393

394-
self.invperm = torch.argsort(self.perm_cpu)
395-
self.g_idx = self.g_idx[self.invperm]
394+
self.invperm = torch.argsort(self.perm)
395+
# self.g_idx = self.g_idx[self.invperm]
396396

397397
# Store scales
398398

@@ -449,7 +449,7 @@ def apply_temp(self):
449449

450450
def pack(self, key, qparams):
451451

452-
self.qgroups = self.qgroups.to("cude:0")
452+
self.qgroups = self.qgroups.to("cuda:0")
453453
# self.qscale_max = self.qscale_max.to("cude:0")
454454

455455
assert qparams.scale_bits in [4]

0 commit comments

Comments
 (0)