Skip to content

Commit 60a494a

Browse files
committed
loosen restriction around needing an separate optimizer for learnable codebook
1 parent f867b33 commit 60a494a

File tree

2 files changed

+1
-3
lines changed

2 files changed

+1
-3
lines changed

setup.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
setup(
44
name = 'vector_quantize_pytorch',
55
packages = find_packages(),
6-
version = '1.6.22',
6+
version = '1.6.23',
77
license='MIT',
88
description = 'Vector Quantization - Pytorch',
99
long_description_content_type = 'text/markdown',

vector_quantize_pytorch/vector_quantize_pytorch.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -689,8 +689,6 @@ def __init__(
689689
self.orthogonal_reg_max_codes = orthogonal_reg_max_codes
690690

691691
assert not (ema_update and learnable_codebook), 'learnable codebook not compatible with EMA update'
692-
assert not learnable_codebook or (learnable_codebook and in_place_codebook_optimizer is not None), \
693-
'Must specify an optimizer for the codebook embedding if learnable_codebook is set to True'
694692

695693
assert 0 <= sync_update_v <= 1.
696694
assert not (sync_update_v > 0. and not learnable_codebook), 'learnable codebook must be turned on'

0 commit comments

Comments
 (0)