Skip to content

Commit d36e153

Browse files
authored
Merge pull request #1 from huggingface/toilaluan-feat-taylorseer
Some nit changes
2 parents 3bf9f9d + 9a91821 commit d36e153

File tree

2 files changed

+28
-7
lines changed

2 files changed

+28
-7
lines changed

src/diffusers/hooks/taylorseer_cache.py

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -69,13 +69,15 @@ class TaylorSeerCacheConfig:
6969
- Patterns are matched using `re.fullmatch` on the module name.
7070
- If `skip_predict_identifiers` or `cache_identifiers` are provided, only matching modules are hooked.
7171
- If neither is provided, all attention-like modules are hooked by default.
72-
- Example of inactive and active usage:
73-
```
74-
def forward(x):
75-
x = self.module1(x) # inactive module: returns zeros tensor based on shape recorded during full compute
76-
x = self.module2(x) # active module: caches output here, avoiding recomputation of prior steps
77-
return x
78-
```
72+
73+
Example of inactive and active usage:
74+
75+
```py
76+
def forward(x):
77+
x = self.module1(x) # inactive module: returns zeros tensor based on shape recorded during full compute
78+
x = self.module2(x) # active module: caches output here, avoiding recomputation of prior steps
79+
return x
80+
```
7981
"""
8082

8183
cache_interval: int = 5

src/diffusers/utils/dummy_pt_objects.py

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -257,6 +257,21 @@ def from_pretrained(cls, *args, **kwargs):
257257
requires_backends(cls, ["torch"])
258258

259259

260+
class TaylorSeerCacheConfig(metaclass=DummyObject):
261+
_backends = ["torch"]
262+
263+
def __init__(self, *args, **kwargs):
264+
requires_backends(self, ["torch"])
265+
266+
@classmethod
267+
def from_config(cls, *args, **kwargs):
268+
requires_backends(cls, ["torch"])
269+
270+
@classmethod
271+
def from_pretrained(cls, *args, **kwargs):
272+
requires_backends(cls, ["torch"])
273+
274+
260275
def apply_faster_cache(*args, **kwargs):
261276
requires_backends(apply_faster_cache, ["torch"])
262277

@@ -273,6 +288,10 @@ def apply_pyramid_attention_broadcast(*args, **kwargs):
273288
requires_backends(apply_pyramid_attention_broadcast, ["torch"])
274289

275290

291+
def apply_taylorseer_cache(*args, **kwargs):
292+
requires_backends(apply_taylorseer_cache, ["torch"])
293+
294+
276295
class AllegroTransformer3DModel(metaclass=DummyObject):
277296
_backends = ["torch"]
278297

0 commit comments

Comments
 (0)