This repository was archived by the owner on Feb 24, 2026. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 58
Expand file tree
/
Copy pathutils_quant.py
More file actions
239 lines (200 loc) · 8.93 KB
/
utils_quant.py
File metadata and controls
239 lines (200 loc) · 8.93 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# pylint: disable=missing-docstring, invalid-name
"""This is modified from https://huggingface.co/1bitLLM/bitnet_b1_58-3B/blob/main/utils_quant.py to work with BitBLAS."""
import torch
from torch import nn
from bitblas.cache import global_operator_cache, get_database_path
from bitblas import Matmul, MatmulConfig
from bitblas import auto_detect_nvidia_target
from logging import getLogger
logger = getLogger(__name__)
BITBLAS_TARGET = auto_detect_nvidia_target()
BITBLAS_DATABASE_PATH = get_database_path()
def weight_quant(weight, num_bits=1):
dtype = weight.dtype
weight = weight.float()
s = 1 / weight.abs().mean().clamp(min=1e-5)
result = (weight * s).round().clamp(-1, 1) / s
return result.type(dtype)
def activation_quant(x, num_bits=8):
dtype = x.dtype
x = x.float()
Qn = -(2**(num_bits - 1))
Qp = 2**(num_bits - 1) - 1
s = Qp / x.abs().max(dim=-1, keepdim=True).values.clamp(min=1e-5)
result = (x * s).round().clamp(Qn, Qp) / s
return result.type(dtype)
class BitLinearBitBLAS(nn.Module):
def __init__(
self,
in_features: int,
out_features: int,
weight_bits=1,
input_bits=8,
**kwargs,
):
super().__init__()
"""
RMSNorm is placed outside BitLinear
"""
self.in_features = in_features
self.out_features = out_features
self.weight_bits = weight_bits
self.input_bits = input_bits
matmul_config = MatmulConfig(
N=self.out_features, # N dimension
K=self.in_features, # K dimension
A_dtype="int8", # activation A dtype
W_dtype="int2", # weight W dtype
accum_dtype="int32", # accumulation dtype
out_dtype="float32", # output dtype
layout="nt", # matrix layout, "nt" indicates the layout of A is non-transpose and the layout of W is transpose
with_bias=False, # bias
# configs for weight only quantization
group_size=None, # setting for grouped quantization
with_scaling=False, # setting for scaling factor
with_zeros=False, # setting for zeros
zeros_mode=None, # setting for how to calculating zeros
)
ENABLE_TUNING = True
self.bitblas_matmul = self._get_or_create_bitblas_operator(matmul_config, ENABLE_TUNING)
self.format = "bitnet"
self.Qp = 2**(self.input_bits - 1) - 1
def _get_or_create_bitblas_operator(self, config, enable_tuning):
if global_operator_cache.size() == 0:
global_operator_cache.load_from_database(BITBLAS_DATABASE_PATH, BITBLAS_TARGET)
logger.info(f"Loaded {global_operator_cache.size()} operators from database.")
bitblas_matmul = global_operator_cache.get(config)
if bitblas_matmul is None:
# should disable tuning for the first time because we may require loading bitblas operator from database.
bitblas_matmul = Matmul(config, target=BITBLAS_TARGET, enable_tuning=False)
if enable_tuning:
bitblas_matmul.hardware_aware_finetune(topk=20)
global_operator_cache.add(config, bitblas_matmul)
global_operator_cache.save_into_database(BITBLAS_DATABASE_PATH, BITBLAS_TARGET)
print("BitBLAS Tuning done, appended operator to global_operator_cache.")
else:
print("BitBLAS Operator created.")
else:
print("BitBLAS Operator found in global_operator_cache.")
return bitblas_matmul
def replace_weight_param_with_qweight(self):
if hasattr(self, "weight"):
del self.weight
quant_weight = torch.empty(self.bitblas_matmul.retrieve_weight_shape())
self.qweight = nn.Parameter(quant_weight, requires_grad=False)
self.format = "bitblas"
@classmethod
def from_bit_linear(cls, bitlinear, weight_group=1):
bitblas_linear = cls(
bitlinear.in_features, bitlinear.out_features, weight_bits=1, input_bits=8)
sw, qweight = bitblas_linear.create_bitblas_weights(bitlinear.weight, weight_group)
bitblas_linear.register_buffer("qweight", qweight)
bitblas_linear.register_buffer("sw", sw)
if bitlinear.bias is not None:
bitblas_linear.register_buffer("bias", bitlinear.bias)
else:
bitblas_linear.bias = None
return bitblas_linear
def create_bitblas_weights(self, weight, weight_group=1):
if weight_group:
hidden_size = weight.size(0)
group_size = hidden_size // weight_group
sw_list = []
qweight_list = []
for i in range(weight_group):
start_idx = i * group_size
end_idx = (i + 1) * group_size
sw = 1 / weight[start_idx:end_idx].abs().mean().clamp(min=1e-5)
sw_list.append(sw.repeat(group_size))
qweight = self.weight_quant(weight[start_idx:end_idx]).detach()
qweight_list.append(qweight)
sw = torch.cat(sw_list, dim=0)
qweight = torch.cat(qweight_list, dim=0)
else:
sw = 1 / weight.abs().mean().clamp(min=1e-5)
qweight = self.weight_quant(weight).detach()
qweight = self.bitblas_matmul.transform_weight(qweight)
qweight = nn.Parameter(qweight, requires_grad=False)
return sw, qweight
def post_process_weights(self):
sw = 1 / self.weight.abs().mean().clamp(min=1e-5)
self.sw = sw
quant_weight = self.weight_quant(self.weight).detach()
quant_weight = self.bitblas_matmul.transform_weight(quant_weight)
# remove self.weight and replace it with quant_weight
if hasattr(self, "weight"):
del self.weight
self.qweight = nn.Parameter(quant_weight, requires_grad=False)
self.format = "bitblas"
@staticmethod
def weight_quant(weight):
weight = weight.float()
s = 1 / weight.abs().mean().clamp(min=1e-5)
result = (weight * s).round().clamp(-1, 1)
return result.type(torch.int8)
@torch.compile
def activation_quant(self, x, num_bits=8):
x = x.float()
Qn = -(2**(num_bits - 1))
Qp = 2**(num_bits - 1) - 1
s = Qp / x.abs().max(dim=-1, keepdim=True).values.clamp(min=1e-5)
result = (x * s).round().clamp(Qn, Qp)
return result.type(torch.int8), s
@torch.compile
def post_quant_process(self, input, si, sw):
out = input / si
out = out / sw
out = out.half()
return out
# for the correctness evaluation.
def native_forward(self, input):
quant_input = (input + (activation_quant(input, self.input_bits) - input).detach())
quant_weight = (
self.weight + (weight_quant(self.weight, self.weight_bits) - self.weight).detach())
out = nn.functional.linear(quant_input, quant_weight)
if self.bias is not None:
out += self.bias.view(1, -1).expand_as(out)
return out
def forward_fp32_simulated(self, input):
quant_input, si = self.activation_quant(input, self.input_bits).detach()
quant_weight = self.weight_quant(self.weight).detach()
fp32_simulated_input = quant_input.float()
fp32_simulated_weight = quant_weight.float()
fp32_simulated_out = nn.functional.linear(fp32_simulated_input, fp32_simulated_weight)
sw = 1 / self.weight.abs().mean().clamp(min=1e-5)
# if / (si * sw) it will inf in some cases
out = fp32_simulated_out / si
out = out / sw
out = out.half()
if self.bias is not None:
out += self.bias.view(1, -1).expand_as(out)
return out
def forward(self, input):
# return self.forward_fp32_simulated(input)
quant_input, si = self.activation_quant(input, self.input_bits)
fp32_out = self.bitblas_matmul(quant_input, self.qweight)
sw = self.sw
# if / (si * sw) it will inf in some cases
out = self.post_quant_process(fp32_out, si, sw)
if self.bias is not None:
out += self.bias.view(1, -1).expand_as(out)
return out
# Naive BitLinear from HuggingFace
class BitLinear(nn.Linear):
def __init__(self, *kargs, weight_bits=1, input_bits=8, **kwargs):
super(BitLinear, self).__init__(*kargs, **kwargs)
"""
RMSNorm is placed outside BitLinear
"""
self.weight_bits = weight_bits
self.input_bits = input_bits
def forward(self, input):
quant_input = input + (activation_quant(input, self.input_bits) - input).detach()
quant_weight = self.weight + (weight_quant(self.weight, self.weight_bits) -
self.weight).detach()
out = nn.functional.linear(quant_input, quant_weight)
if self.bias is not None:
out += self.bias.view(1, -1).expand_as(out)
return out