control-a-video
control-a-video copied to clipboard
Question about training CAV
Hi, I'm sincerely glad that you shared your great work!
I tried to reimplement the training logic of CAV but had some troubles.. Can you take a look at what might be the problem?
train.py:
import argparse
import datetime
import logging
import inspect
import math
import os
from typing import Dict, Optional, Tuple
from omegaconf import OmegaConf
from tqdm.auto import tqdm
import time
import itertools
import imageio
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
from einops import rearrange, repeat
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import set_seed
import diffusers
from diffusers import AutoencoderKL, DDPMScheduler, DDIMScheduler
from diffusers.optimization import get_scheduler
from diffusers.utils.import_utils import is_xformers_available
import transformers
from transformers import CLIPTextModel, CLIPTokenizer
from transformers import DPTForDepthEstimation
from model.video_diffusion.models.unet_3d_condition import UNetPseudo3DConditionModel
from model.video_diffusion.models.controlnet3d import ControlNet3DModel
from model.video_diffusion.pipelines.pipeline_stable_diffusion_controlnet3d import Controlnet3DStableDiffusionPipeline
from model.video_diffusion.dataloader.dataset import ControlAVideoDataset
import utils
from vis_utils import image_utils as iutils
logger = get_logger(__name__, log_level="INFO")
def find_trainable_params(name:str, trainable_modules:tuple):
_bool = False
for tm in trainable_modules:
if tm in name:
_bool = True
break
return _bool
def get_statedict_for_sanity_check(model, checklist):
param_name_to_meanvalue = {}
# i = 0
for name, p in model.named_parameters():
if name in checklist:
param_name_to_meanvalue[name] = p.mean().item()
else:
continue
return param_name_to_meanvalue
def main(config):
print(f"config : {config}")
accelerator = Accelerator(
gradient_accumulation_steps=config.optimizer.gradient_accumulation_steps,
mixed_precision=config.mixed_precision,
)
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state, main_process_only=False)
if accelerator.is_local_main_process:
transformers.utils.logging.set_verbosity_warning()
diffusers.utils.logging.set_verbosity_info()
else:
transformers.utils.logging.set_verbosity_error()
diffusers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if config.seed is not None:
set_seed(config.seed)
# Handle the output folder creation
if accelerator.is_main_process:
now = datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S")
config.output_dir = os.path.join(config.output_dir, f"{now}")
os.makedirs(config.output_dir, exist_ok=True)
os.makedirs(f"{config.output_dir}/samples", exist_ok=True)
os.makedirs(f"{config.output_dir}/inv_latents", exist_ok=True)
OmegaConf.save(config, os.path.join(config.output_dir, 'config.yaml'))
### === Load scheduler, tokenizer and models. ============================================================ ###
pretrained_model_path = config.pretrained.pretrained_model_path
control_mode = config.pretrained.control_mode
controlnet_model_path = config.pretrained.controlnet_model_path
noise_scheduler = DDPMScheduler.from_pretrained(pretrained_model_path, subfolder="scheduler")
tokenizer = CLIPTokenizer.from_pretrained(pretrained_model_path, subfolder="tokenizer")
text_encoder = CLIPTextModel.from_pretrained(pretrained_model_path, subfolder="text_encoder")
vae = AutoencoderKL.from_pretrained(pretrained_model_path, subfolder="vae")
unet = UNetPseudo3DConditionModel.from_2d_model(pretrained_model_path, subfolder="unet")
controlnet = ControlNet3DModel.from_2d_model(controlnet_model_path)
if control_mode == 'depth':
annotator_model = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas").to("cuda")
elif control_mode == 'canny':
annotator_model = None
elif control_mode == 'hed':
# firstly download from https://huggingface.co/wf-genius/controlavideo-hed/resolve/main/hed-network.pth
annotator_model = HEDNetwork('./pretrained_checkpoints/hed-network.pth').to("cuda")
# print(f"unet : {unet}")
### ====================================================================================================== ###
### === Freeze vae and text_encoder
vae.requires_grad_(False)
text_encoder.requires_grad_(False)
annotator_model.requires_grad_(False)
unet.requires_grad_(False)
trainable_modules = tuple(config.trainable_modules)
for name, module in unet.named_modules():
# print(f"name : {name}")
# if name.endswith(tuple(config.trainable_modules)):
if find_trainable_params(name, trainable_modules):
# print(f"---> selected name : {name}")
for params in module.parameters():
params.requires_grad = True
controlnet.requires_grad_(False)
trainable_modules = tuple(config.trainable_modules)
for name, module in controlnet.named_modules():
# print(f"controlnet name : {name}")
if find_trainable_params(name, trainable_modules):
# print(f"---> selected name : {name}")
for params in module.parameters():
params.requires_grad = True
### ====================================================================================================== ###
# for name, p in controlnet.named_parameters():
# print(f"222 name : {name}")
# if find_trainable_params(name, trainable_modules):
# print(f"---> 222 selected name : {name}")
weight_sanity_checklist = [
"down_blocks.2.resnets.1.conv2.weight",
"down_blocks.2.resnets.1.conv2.conv_temporal.weight",
"controlnet_cond_embedding.conv_in.weight",
"controlnet_cond_embedding.conv_in.conv_temporal.weight",
]
### ====================================================================================================== ###
if config.enable_xformers_memory_efficient_attention:
if is_xformers_available():
unet.enable_xformers_memory_efficient_attention()
else:
raise ValueError("xformers is not available. Make sure it is installed correctly")
if config.gradient_checkpointing:
unet.enable_gradient_checkpointing()
### === Set optimizer ===
config_optimizer = config.optimizer
if config_optimizer.scale_lr:
learning_rate = (
config_optimizer.learning_rate * config_optimizer.gradient_accumulation_steps * train_batch_size * accelerator.num_processes
)
else:
learning_rate = config_optimizer.learning_rate
# Initialize the optimizer
if config_optimizer.use_8bit_adam:
try:
import bitsandbytes as bnb
except ImportError:
raise ImportError(
"Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`"
)
optimizer_cls = bnb.optim.AdamW8bit
else:
optimizer_cls = torch.optim.AdamW
optimizer = optimizer_cls(
itertools.chain(unet.parameters(), controlnet.parameters()),
lr=learning_rate,
betas=(config_optimizer.adam_beta1, config_optimizer.adam_beta2),
weight_decay=config_optimizer.adam_weight_decay,
eps=config_optimizer.adam_epsilon,
)
# optimizer = optimizer_cls(
# unet.parameters(),
# lr=learning_rate,
# betas=(config_optimizer.adam_beta1, config_optimizer.adam_beta2),
# weight_decay=config_optimizer.adam_weight_decay,
# eps=config_optimizer.adam_epsilon,
# )
print(f"optimizer : {optimizer}")
# Scheduler
lr_scheduler = get_scheduler(
config_optimizer.lr_scheduler_type,
optimizer=optimizer,
num_warmup_steps=config_optimizer.lr_warmup_steps * config_optimizer.gradient_accumulation_steps,
num_training_steps=config.max_train_steps * config_optimizer.gradient_accumulation_steps,
)
print(f"lr_scheduler : {lr_scheduler}")
### ====================================================================================================== ###
### === Get the training dataset pipeline === ###
train_dataset = ControlAVideoDataset(**config.train_data)
# Preprocessing the dataset
train_dataset.prompt_ids = tokenizer(
train_dataset.prompt,
max_length=tokenizer.model_max_length,
padding="max_length", truncation=True, return_tensors="pt"
).input_ids[0]
# print(f"prompt_ids : {train_dataset.prompt_ids}")
# DataLoaders creation:
train_dataloader = torch.utils.data.DataLoader(
train_dataset, batch_size=config.train_batch_size
)
### ====================================================================================================== ###
### === Get the validation dataset pipeline === ###
validation_pipeline = Controlnet3DStableDiffusionPipeline(
vae=vae, unet=unet,
text_encoder=text_encoder, tokenizer=tokenizer,
controlnet=controlnet,
scheduler=DDIMScheduler.from_pretrained(pretrained_model_path, subfolder='scheduler'),
annotator_model=annotator_model,
)
### ====================================================================================================== ###
# Prepare everything with our `accelerator`.
unet, controlnet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
unet, controlnet, optimizer, train_dataloader, lr_scheduler
)
# For mixed precision training we cast the text_encoder and vae weights to half-precision
# as these models are only used for inference, keeping weights in full precision is not required.
weight_dtype = torch.float32
if accelerator.mixed_precision == "fp16":
weight_dtype = torch.float16
elif accelerator.mixed_precision == "bf16":
weight_dtype = torch.bfloat16
print(f" weight dtype : {weight_dtype}")
# Move text_encode and vae to gpu and cast to weight_dtype
text_encoder.to(accelerator.device, dtype=weight_dtype)
vae.to(accelerator.device, dtype=weight_dtype)
annotator_model.to(accelerator.device, dtype=weight_dtype)
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / config.optimizer.gradient_accumulation_steps)
# Afterwards we recalculate our number of training epochs
num_train_epochs = math.ceil(config.max_train_steps / num_update_steps_per_epoch)
# We need to initialize the trackers we use, and also store our configuration.
# The trackers initializes automatically on the main process.
if accelerator.is_main_process:
accelerator.init_trackers("text2video-controlavideo")
### === Logging for training === ###
total_batch_size = config.train_batch_size * accelerator.num_processes * config.optimizer.gradient_accumulation_steps
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num Epochs = {num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {config.train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
logger.info(f" Gradient Accumulation steps = {config.optimizer.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {config.max_train_steps}")
global_step = 0
first_epoch = 0
w = config.train_data.width
h = config.train_data.height
n_sample_frames = config.train_data.n_sample_frames
### ====================================================================================================== ###
### === Train === ###
# Only show the progress bar once on each machine.
progress_bar = tqdm(range(global_step, config.max_train_steps), disable=not accelerator.is_local_main_process)
progress_bar.set_description("Steps")
for epoch in range(first_epoch, num_train_epochs):
unet.train()
controlnet.train()
train_loss = 0.0
for step, batch in enumerate(train_dataloader):
# # Skip steps until we reach the resumed step
# if resume_from_checkpoint and epoch == first_epoch and step < resume_step:
# if step % gradient_accumulation_steps == 0:
# progress_bar.update(1)
# continue
# start_time = time.time()
with accelerator.accumulate(unet):
# Convert videos to latent space
pixel_values = batch["pixel_values"].to(weight_dtype) # B x F x 3 x 512 x 512
video_length = pixel_values.shape[1]
pixel_values = rearrange(pixel_values, "b f c h w -> (b f) c h w") # torch.Size([24, 3, 512, 512])
### === Prepare inputs ===
latents = vae.encode(pixel_values).latent_dist.sample()
# print(f"latents : {latents.size()}") # torch.Size([24, 4, 64, 64])
latents = rearrange(latents, "(b f) c h w -> b c f h w", f=video_length) # torch.Size([1, 4, 24, 64, 64])
latents = latents * 0.18215
control_maps = validation_pipeline.get_depth_map(pixel_values, h, w, return_standard_norm=False)# (b f) 1 h w
control_maps = control_maps.to(dtype=controlnet.dtype, device=controlnet.device) # torch.Size([16, 1, 512, 512])
control_maps = F.interpolate(control_maps, size=(h,w), mode='bilinear', align_corners=False) # torch.Size([16, 1, 512, 512])
control_maps = rearrange(control_maps, "(b f) c h w -> b c f h w", f=n_sample_frames)
if control_maps.shape[1] == 1:
control_maps = repeat(control_maps, 'b c f h w -> b (n c) f h w', n=3)
# print(f"control_maps 3: {control_maps.size()}") # torch.Size([1, 3, 16, 512, 512])
if global_step == 0:
tmp_images = batch["pixel_values"]
tmp_control_maps = rearrange(control_maps, "b c f h w-> b f c h w")
tmp = torch.cat([tmp_images, tmp_control_maps], dim=0)
print(f"tmp : {tmp.size()}")
image_dict = [
{"tensors": tmp, "n_in_row": 4, "pp_type": iutils.PP_RGB},
]
dir_path = utils.mkdir_ifnotexist(os.path.join(config.output_dir, "train"))
iutils.save_images_from_dict(
image_dict, dir_path=dir_path, file_name=f"inputs_"+str(global_step).zfill(8),
n_instance=6, is_save=accelerator.is_main_process, return_images=False
)
### ====================================================================================================== ###
# Sample noise that we'll add to the latents
bsz = latents.shape[0]
# Sample a random timestep for each video
timesteps = torch.randint(0, noise_scheduler.num_train_timesteps, (bsz,), device=latents.device)
timesteps = timesteps.long() # tensor([905], device='cuda:0')
# Add noise to the latents according to the noise magnitude at each timestep
# (this is the forward diffusion process)
first_latent = latents[:, 0:1, :, :, :]
first_target = torch.zeros_like(first_latent)
rest_latents = latents[:, 1:, :, :, :]
rest_noise = torch.randn_like(rest_latents)
# rest_noise = torch.randn_like(rest_latents) - first_latent.repeat(1, rest_latents.size(1), 1, 1, 1)
noisy_latents = noise_scheduler.add_noise(rest_latents, rest_noise, timesteps)
in_latents = torch.cat([first_latent, noisy_latents], dim=1) # torch.Size([1, 4, 16, 64, 64])
# Get the text embedding for conditioning
encoder_hidden_states = text_encoder(batch["prompt_ids"])[0] # torch.Size([1, 77, 768])
# Get the target for loss depending on the prediction type
if noise_scheduler.prediction_type == "epsilon":
# target = torch.cat([first_target, rest_noise], dim=1)
target = rest_noise
elif noise_scheduler.prediction_type == "v_prediction":
target = noise_scheduler.get_velocity(rest_latents, rest_noise, timesteps)
else:
raise ValueError(f"Unknown prediction type {noise_scheduler.prediction_type}")
### === Forward controlnet
down_block_res_samples, mid_block_res_sample = controlnet(
in_latents,
timesteps,
encoder_hidden_states=encoder_hidden_states,
controlnet_cond=control_maps,
return_dict=False,
)
down_block_res_samples = [
down_block_res_sample * config.controlnet.controlnet_conditioning_scale
for down_block_res_sample in down_block_res_samples
]
mid_block_res_sample *= config.controlnet.controlnet_conditioning_scale # torch.Size([1, 1280, 16, 8, 8])
### ====================================================================================================== ###
### === Predict the noise residual and compute loss
model_pred = unet(
in_latents,
timesteps,
encoder_hidden_states=encoder_hidden_states,
down_block_additional_residuals=down_block_res_samples,
mid_block_additional_residual=mid_block_res_sample,
).sample
# model_pred = unet(
# in_latents,
# timesteps,
# encoder_hidden_states=encoder_hidden_states,
# ).sample
### ====================================================================================================== ###
### === Calculate loss
loss = F.mse_loss(model_pred[:, 1:, :, :, :].float(), target.float(), reduction="mean")
# loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
# Gather the losses across all processes for logging (if we use distributed training).
avg_loss = accelerator.gather(loss.repeat(config.train_batch_size)).mean()
train_loss += avg_loss.item() / config.optimizer.gradient_accumulation_steps
### ====================================================================================================== ###
### === Backpropagate
accelerator.backward(loss)
if accelerator.sync_gradients:
accelerator.clip_grad_norm_(itertools.chain(unet.parameters(), controlnet.parameters()), config.optimizer.max_grad_norm)
# accelerator.clip_grad_norm_(unet.parameters(), config.optimizer.max_grad_norm)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
### ====================================================================================================== ###
### === Log
print(f"epoch: {epoch}, global_step: {global_step}, timestep: {timesteps}, train_loss : {train_loss}")
param_name_to_meanvalue = get_statedict_for_sanity_check(unet, weight_sanity_checklist)
for key, value in param_name_to_meanvalue.items():
print(f" [weight sanity check], key: {key}, value: {value} ")
# param_name_to_meanvalue = get_statedict_for_sanity_check(controlnet, weight_sanity_checklist)
# for key, value in param_name_to_meanvalue.items():
# print(f" [weight sanity check], key: {key}, value: {value} ")
### ====================================================================================================== ###
# print(f"iter time : {time.time() - start_time}") # 1 iter 에 7.3s 정도 걸리네
# Checks if the accelerator has performed an optimization step behind the scenes
if accelerator.sync_gradients:
progress_bar.update(1)
global_step += 1
accelerator.log({"train_loss": train_loss}, step=global_step)
train_loss = 0.0
if accelerator.is_main_process and (global_step % config.checkpointing_steps == 0):
dir_path = utils.mkdir_ifnotexist(os.path.join(config.output_dir, "checkpoints"))
save_path = os.path.join(dir_path, f"checkpoint-{global_step}")
accelerator.save_state(save_path)
logger.info(f"Saved state to {save_path}")
if accelerator.is_main_process and (global_step % config.validation_steps == 0):
frames = (pixel_values+1.0) / 2.0
v2v_input_frames = torch.nn.functional.interpolate(
pixel_values,
size=(h, w),
mode="bicubic",
antialias=True,
)
v2v_input_frames = rearrange(v2v_input_frames, '(b f) c h w -> b c f h w ', f=config.train_data.n_sample_frames)
print(f"in validation logic, pixel_values : {pixel_values.size()}, dtype: {pixel_values.dtype}, device: {pixel_values.device}")
print(f"in validation logic, v2v_input_frames : {v2v_input_frames.size()}, dtype: {v2v_input_frames.dtype}, device: {v2v_input_frames.device}")
out = validation_pipeline(
controlnet_hint=control_maps,
# controlnet_hint=None,
images=v2v_input_frames,
first_frame_output=None,
prompt=train_dataset.prompt,
# num_inference_steps=num_inference_steps,
num_inference_steps=config.inference.inference_steps,
width=w,
height=h,
# guidance_scale=guidance_scale,
guidance_scale=config.inference.guidance_scale,
generator=[torch.Generator(device="cuda").manual_seed(config.seed)],
# video_scale = video_scale, # per-frame as negative (>= 1 or set 0)
video_scale = config.inference.video_scale, # per-frame as negative (>= 1 or set 0)
# init_noise_by_residual_thres = init_noise_thres, # residual-based init. larger thres ==> more smooth.
init_noise_by_residual_thres = config.inference.init_noise_thres, # residual-based init. larger thres ==> more smooth.
controlnet_conditioning_scale=1.0,
fix_first_frame=True,
in_domain=True, # whether to use the video model to generate the first frame.
)
dir_path = utils.mkdir_ifnotexist(os.path.join(config.output_dir, "samples"))
output_filename = f"inputs_{str(global_step).zfill(8)}.gif"
# print(f"out : type : {type(out)}") # out : type : <class 'diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput'>
out = out.images[0]
imageio.mimsave(os.path.join(dir_path, output_filename), out, fps=8)
### ====================================================================================================== ###
print(f"End of process..!")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--config", type=str, default="./configs/depth.yaml")
parser.add_argument("--desc", type=str, default="_")
parser.add_argument("--output_dir", type=str, default="./outputs")
args = parser.parse_args()
config = OmegaConf.load(args.config)
### update config
config.output_dir = f"{args.output_dir}/{args.desc}"
### ===============================================================
main(config)
where config file contains the followings:
train_batch_size: 1
max_train_steps: 500
checkpointing_steps: 100
validation_steps: 3
trainable_modules:
- "_temporal"
seed: 33
### === config for accelerator ====== ###
mixed_precision: fp16
### ================================= ###
enable_xformers_memory_efficient_attention: True
gradient_checkpointing: True
# pretrained_model_path: "runwayml/stable-diffusion-v1-5"
pretrained:
pretrained_model_path: "./pretrained_checkpoints/stable-diffusion-v1-5"
control_mode: "depth"
controlnet_model_path: "./pretrained_checkpoints/sd-controlnet-depth"
controlnet:
controlnet_conditioning_scale: 1.0
train_data:
video_path: "./videos/bear.mp4"
prompt: "a bear is walking"
n_sample_frames: 8
width: 512
height: 512
sample_start_idx: 0
sample_frame_rate: 2
### === config for optimizer ====== ###
optimizer:
use_8bit_adam: False
scale_lr: False
learning_rate: 3e-5
adam_beta1: 0.9
adam_beta2: 0.999
adam_weight_decay: 1e-2
adam_epsilon: 1e-08
max_grad_norm: 1.0
lr_scheduler_type: "constant"
lr_warmup_steps: 0
gradient_accumulation_steps: 1
### ================================= ###
inference:
inference_steps: 20
guidance_scale: 7.5
video_scale: 1.5
init_noise_thres: 0.1
With the current code, the model outputs
at step 3
at step 6
at step9
I suspect that the loss function part or the validation pipeline part may be wrong, but it is difficult to think of a way other than the content of the text.