Skip to content

Commit 4956bf7

Browse files
committed
bsq seems to work well
1 parent 2681312 commit 4956bf7

File tree

3 files changed

+9
-6
lines changed

3 files changed

+9
-6
lines changed

examples/autoencoder_lfq.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,8 @@
1818
codebook_size = 2 ** 8
1919
entropy_loss_weight = 0.02
2020
diversity_gamma = 1.
21+
spherical = True
22+
2123
device = "cuda" if torch.cuda.is_available() else "cpu"
2224

2325
class LFQAutoEncoder(nn.Module):
@@ -107,7 +109,8 @@ def iterate_dataset(data_loader):
107109
model = LFQAutoEncoder(
108110
codebook_size = codebook_size,
109111
entropy_loss_weight = entropy_loss_weight,
110-
diversity_gamma = diversity_gamma
112+
diversity_gamma = diversity_gamma,
113+
spherical = spherical
111114
).to(device)
112115

113116
opt = torch.optim.AdamW(model.parameters(), lr=lr)

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[project]
22
name = "vector-quantize-pytorch"
3-
version = "1.14.37"
3+
version = "1.14.38"
44
description = "Vector Quantization - Pytorch"
55
authors = [
66
{ name = "Phil Wang", email = "lucidrains@gmail.com" }

vector_quantize_pytorch/lookup_free_quantization.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -291,6 +291,10 @@ def forward(
291291
codebook_value = torch.ones_like(x) * self.codebook_scale
292292
quantized = torch.where(x > 0, codebook_value, -codebook_value)
293293

294+
# calculate indices
295+
296+
indices = reduce((quantized > 0).int() * self.mask.int(), 'b n c d -> b n c', 'sum')
297+
294298
# maybe l2norm
295299

296300
if self.spherical:
@@ -304,10 +308,6 @@ def forward(
304308
else:
305309
x = quantized
306310

307-
# calculate indices
308-
309-
indices = reduce((x > 0).int() * self.mask.int(), 'b n c d -> b n c', 'sum')
310-
311311
# entropy aux loss
312312

313313
if self.training:

0 commit comments

Comments
 (0)