fast-stable-diffusion
fast-stable-diffusion copied to clipboard
Convert six assignment statements to augmented source code
:eyes: Some source code analysis tools can help to find opportunities for improving software components. :thought_balloon: I propose to increase the usage of augmented assignment statements accordingly.
diff --git a/AUTOMATIC1111_files/deforum/depth.py b/AUTOMATIC1111_files/deforum/depth.py
index 6c8cb6d..b46a9fb 100644
--- a/AUTOMATIC1111_files/deforum/depth.py
+++ b/AUTOMATIC1111_files/deforum/depth.py
@@ -128,7 +128,7 @@ class DepthModel():
# MiDaS makes the near values greater, and the far values lesser. Let's reverse that and try to align with AdaBins a bit better.
midas_depth = np.subtract(50.0, midas_depth)
- midas_depth = midas_depth / 19.0
+ midas_depth /= 19.0
# blend between MiDaS and AdaBins predictions
if use_adabins:
diff --git a/Relaxed-mode/Relaxed.py b/Relaxed-mode/Relaxed.py
index 72ded4c..266b6f5 100644
--- a/Relaxed-mode/Relaxed.py
+++ b/Relaxed-mode/Relaxed.py
@@ -406,7 +406,7 @@ def seed_to_int(s):
return random.randint(0, 2**32 - 1)
n = abs(int(s) if s.isdigit() else random.Random(s).randint(0, 2**32 - 1))
while n >= 2**32:
- n = n >> 32
+ n >>= 32
alphas=n
return n
@@ -867,7 +867,7 @@ skip_grid, sort_samples, sampler_name, ddim_eta, n_iter, batch_size, i, denoisin
output, img_mode = RealESRGAN.enhance(x_sample[:,:,::-1])
x_sample = output[:,:,::-1]
image = Image.fromarray(x_sample)
- filename = filename + '-esrgan'
+ filename += '-esrgan'
save_sample(image, sample_path_i, filename, jpg_sample, prompts, seeds, width, height, steps, cfg_scale,
normalize_prompt_weights, use_GFPGAN, write_info_files, prompt_matrix, init_img, uses_loopback, uses_random_seed_loopback, skip_save,
skip_grid, sort_samples, sampler_name, ddim_eta, n_iter, batch_size, i, denoising_strength, resize_mode)
@@ -1147,7 +1147,7 @@ def img2img(prompt: str, image_editor_mode: str, init_info, mask_mode: str, mask
t_enc_steps = t_enc
obliterate = False
if ddim_steps == t_enc_steps:
- t_enc_steps = t_enc_steps - 1
+ t_enc_steps -= 1
obliterate = True
if sampler_name != 'DDIM':
@@ -1233,7 +1233,7 @@ def img2img(prompt: str, image_editor_mode: str, init_info, mask_mode: str, mask
init_img = output_images[0]
if not random_seed_loopback:
- seed = seed + 1
+ seed += 1
else:
seed = seed_to_int(None)
denoising_strength = max(denoising_strength * 0.95, 0.1)
diff --git a/precompiled/attention.py b/precompiled/attention.py
index eab3563..29e76e4 100644
--- a/precompiled/attention.py
+++ b/precompiled/attention.py
@@ -143,7 +143,7 @@ class SpatialSelfAttention(nn.Module):
k = rearrange(k, 'b c h w -> b c (h w)')
w_ = torch.einsum('bij,bjk->bik', q, k)
- w_ = w_ * (int(c)**(-0.5))
+ w_ *= (int(c)**(-0.5))
w_ = torch.nn.functional.softmax(w_, dim=2)
# attend to values
modifying the repo files will prevent git pull from functioning and a hard reset might remove some user settings
:thought_balloon: Do you suggest to improve the affected source code in any other development repository?
💭 Do you suggest to improve the affected source code in any other development repository?
https://github.com/AUTOMATIC1111/stable-diffusion-webui/