Skip to content

Commit 57084da

Browse files
Remove unnecessary lines (#8569)
* Remove unused line --------- Co-authored-by: Sayak Paul <[email protected]>
1 parent 70611a1 commit 57084da

File tree

11 files changed

+3
-21
lines changed

11 files changed

+3
-21
lines changed

examples/community/regional_prompting_stable_diffusion.py

-2
Original file line numberDiff line numberDiff line change
@@ -467,8 +467,6 @@ def make_emblist(self, prompts):
467467

468468

469469
def split_dims(xs, height, width):
470-
xs = xs
471-
472470
def repeat_div(x, y):
473471
while y > 0:
474472
x = math.ceil(x / 2)

src/diffusers/models/attention_processor.py

+1-3
Original file line numberDiff line numberDiff line change
@@ -1112,9 +1112,7 @@ def __call__(
11121112
key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
11131113
value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
11141114

1115-
hidden_states = hidden_states = F.scaled_dot_product_attention(
1116-
query, key, value, dropout_p=0.0, is_causal=False
1117-
)
1115+
hidden_states = F.scaled_dot_product_attention(query, key, value, dropout_p=0.0, is_causal=False)
11181116
hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
11191117
hidden_states = hidden_states.to(query.dtype)
11201118

src/diffusers/models/controlnet_sd3.py

-2
Original file line numberDiff line numberDiff line change
@@ -308,8 +308,6 @@ def forward(
308308
"Passing `scale` via `joint_attention_kwargs` when not using the PEFT backend is ineffective."
309309
)
310310

311-
height, width = hidden_states.shape[-2:]
312-
313311
hidden_states = self.pos_embed(hidden_states) # takes care of adding positional embeddings too.
314312
temb = self.time_text_embed(timestep, pooled_projections)
315313
encoder_hidden_states = self.context_embedder(encoder_hidden_states)

src/diffusers/models/unets/unet_stable_cascade.py

+1-3
Original file line numberDiff line numberDiff line change
@@ -478,9 +478,7 @@ def custom_forward(*inputs):
478478
create_custom_forward(block), x, r_embed, use_reentrant=False
479479
)
480480
else:
481-
x = x = torch.utils.checkpoint.checkpoint(
482-
create_custom_forward(block), use_reentrant=False
483-
)
481+
x = torch.utils.checkpoint.checkpoint(create_custom_forward(block), use_reentrant=False)
484482
if i < len(repmap):
485483
x = repmap[i](x)
486484
level_outputs.insert(0, x)

src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py

-1
Original file line numberDiff line numberDiff line change
@@ -661,7 +661,6 @@ def __call__(
661661
noise_guidance_edit_tmp = torch.einsum(
662662
"cb,cbijk->bijk", concept_weights_tmp, noise_guidance_edit_tmp
663663
)
664-
noise_guidance_edit_tmp = noise_guidance_edit_tmp
665664
noise_guidance = noise_guidance + noise_guidance_edit_tmp
666665

667666
self.sem_guidance[i] = noise_guidance_edit_tmp.detach().cpu()

tests/lora/test_lora_layers_sd3.py

-1
Original file line numberDiff line numberDiff line change
@@ -153,7 +153,6 @@ def test_simple_inference_with_transformer_lora_save_load(self):
153153
pipe = self.pipeline_class(**components)
154154
pipe = pipe.to(torch_device)
155155
pipe.set_progress_bar_config(disable=None)
156-
inputs = self.get_dummy_inputs(torch_device)
157156

158157
pipe.transformer.add_adapter(transformer_config)
159158
self.assertTrue(check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in transformer")

tests/models/transformers/test_models_prior.py

-3
Original file line numberDiff line numberDiff line change
@@ -144,9 +144,6 @@ def test_output_pretrained(self):
144144
class PriorTransformerIntegrationTests(unittest.TestCase):
145145
def get_dummy_seed_input(self, batch_size=1, embedding_dim=768, num_embeddings=77, seed=0):
146146
torch.manual_seed(seed)
147-
batch_size = batch_size
148-
embedding_dim = embedding_dim
149-
num_embeddings = num_embeddings
150147

151148
hidden_states = torch.randn((batch_size, embedding_dim)).to(torch_device)
152149

tests/pipelines/stable_diffusion_2/test_stable_diffusion_attend_and_excite.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -142,7 +142,7 @@ def get_dummy_inputs(self, device, seed=0):
142142
generator = torch.manual_seed(seed)
143143
else:
144144
generator = torch.Generator(device=device).manual_seed(seed)
145-
inputs = inputs = {
145+
inputs = {
146146
"prompt": "a cat and a frog",
147147
"token_indices": [2, 5],
148148
"generator": generator,

tests/pipelines/stable_diffusion_adapter/test_stable_diffusion_adapter.py

-1
Original file line numberDiff line numberDiff line change
@@ -538,7 +538,6 @@ def test_inference_batch_single_identical(
538538

539539
# batchify inputs
540540
batched_inputs = {}
541-
batch_size = batch_size
542541
for name, value in inputs.items():
543542
if name in self.batch_params:
544543
# prompt is string

tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_adapter.py

-1
Original file line numberDiff line numberDiff line change
@@ -574,7 +574,6 @@ def test_inference_batch_single_identical(
574574

575575
# batchify inputs
576576
batched_inputs = {}
577-
batch_size = batch_size
578577
for name, value in inputs.items():
579578
if name in self.batch_params:
580579
# prompt is string

tests/schedulers/test_scheduler_edm_euler.py

-3
Original file line numberDiff line numberDiff line change
@@ -89,9 +89,6 @@ def test_from_save_pretrained(self):
8989
scheduler_config = self.get_scheduler_config()
9090
scheduler = scheduler_class(**scheduler_config)
9191

92-
sample = self.dummy_sample
93-
residual = 0.1 * sample
94-
9592
with tempfile.TemporaryDirectory() as tmpdirname:
9693
scheduler.save_config(tmpdirname)
9794
new_scheduler = scheduler_class.from_pretrained(tmpdirname)

0 commit comments

Comments
 (0)