ComfyUI
ComfyUI copied to clipboard
Comfy from script: LORA effect decreases after multiple images
Your question
I have a script that runs comfy without the UI and in order to save some loading time, I put a for loops i nthe image generation for multiple images with just seed being different. But the effect of a loaded LORA decreases after 3 or 4 images i nthe loop. Does anyone know why? THanks for your help.
I also modify this function to replace "CPU" by "cuda" in model_management.py and I use LOW VRAM mode def unet_offload_device(): if vram_state == VRAMState.HIGH_VRAM: return get_torch_device() else: return torch.device("cuda") The code is
import random import torch import numpy as np from PIL import Image import nodes from nodes import NODE_CLASS_MAPPINGS from comfyui_extras import nodes_custom_sampler from comfyui_extras import nodes_flux from comfyui import model_management from comfyui_extras import nodes_post_processing from IPython.display import display import ipywidgets as widgets CheckpointLoaderSimple = NODE_CLASS_MAPPINGS"CheckpointLoaderSimple" LoraLoader = NODE_CLASS_MAPPINGS"LoraLoader" FluxGuidance = nodes_flux.NODE_CLASS_MAPPINGS"FluxGuidance" RandomNoise = nodes_custom_sampler.NODE_CLASS_MAPPINGS"RandomNoise" BasicGuider = nodes_custom_sampler.NODE_CLASS_MAPPINGS"BasicGuider" KSamplerSelect = nodes_custom_sampler.NODE_CLASS_MAPPINGS"KSamplerSelect" BasicScheduler = nodes_custom_sampler.NODE_CLASS_MAPPINGS"BasicScheduler" SamplerCustomAdvanced = nodes_custom_sampler.NODE_CLASS_MAPPINGS"SamplerCustomAdvanced" VAELoader = NODE_CLASS_MAPPINGS"VAELoader" VAEDecode = NODE_CLASS_MAPPINGS"VAEDecode" VAEEncode = NODE_CLASS_MAPPINGS"VAEEncode" VAEEncodeTiled = NODE_CLASS_MAPPINGS"VAEEncodeTiled" VAEDecodeTiled = NODE_CLASS_MAPPINGS"VAEDecodeTiled" EmptyLatentImage = NODE_CLASS_MAPPINGS"EmptyLatentImage" DualCLIPLoader = NODE_CLASS_MAPPINGS"DualCLIPLoader" UNETLoader = NODE_CLASS_MAPPINGS"UNETLoader" ImageScaleToTotalPixels = nodes_post_processing.NODE_CLASS_MAPPINGS"ImageScaleToTotalPixels" KSampler=NODE_CLASS_MAPPINGS"KSampler" CLIPTextEncodeFlux=nodes_flux.NODE_CLASS_MAPPINGS"CLIPTextEncodeFlux"
#Load model with torch.inference_mode(): clip = DualCLIPLoader.load_clip("t5xxl_fp8_e4m3fn.safetensors", "clip_l.safetensors", "flux")[0] unet = UNETLoader.load_unet("flux1-dev-fp8.safetensors", "fp8_e4m3fn")[0] vae = VAELoader.load_vae("ae.sft")[0]
def closestNumber(n, m): q = int(n / m) n1 = m * q if (n * m) > 0: n2 = m * (q + 1) else: n2 = m * (q - 1) if abs(n - n1) < abs(n - n2): return n1 return n2
image_numb=0
#Generate images with torch.inference_mode():
positive_prompt = "Professional photograph of robot made of rusty metal "
negative_prompt=""
width = 512
height = 768
seed = 0
if seed == 0:
seed = random.randint(0, 18446744073709551615)
steps = 20
guidance = 1.5
lora_strength_model = 0.9
lora_strength_clip = 0.75
sampler_name = "euler"
scheduler = "simple"
#Load first LORA
unet_lora, clip_lora = LoraLoader.load_lora(unet, clip, "flux_realism_lora.safetensors", lora_strength_model, lora_strength_clip)
latent_image = EmptyLatentImage.generate(closestNumber(width, 16), closestNumber(height, 16))[0]
cond_pos= CLIPTextEncodeFlux.encode(clip_lora,positive_prompt,positive_prompt,guidance)[0]
cond_neg= CLIPTextEncodeFlux.encode(clip,negative_prompt,negative_prompt,guidance)[0]
for i in range (10):
print(f"Current seed is {seed}")
print(f"Current image is flux_{image_numb}.png")
sample = KSampler.sample(unet_lora, seed, steps, 1, sampler_name, scheduler, cond_pos, cond_neg, latent_image, 1)[0]
decoded = VAEDecodeTiled.decode(vae, sample,512)[0].detach()
Image.fromarray(np.array(decoded*255, dtype=np.uint8)[0]).save(f"/content/flux_{toDay}_{image_numb}.png")
seed+=1
image_numb+=1
Logs
No response
Other
No response