denoising-diffusion-pytorch
denoising-diffusion-pytorch copied to clipboard
Script/Instructions for producing samples?
I couldn't find the script to produce samples like the one that exist in the ReadMe. Does it exist in the repo? If not, would you mind committing the evaluation / sampling script you ended up using to generate the images in the ReadMe?
You can use code like this:
import torch
from tqdm import tqdm_notebook
from torchvision.utils import make_grid
from PIL import Image
from model import UNet
from diffusion import GaussianDiffusion, make_beta_schedule
ckpt = torch.load('checkpoint/diffusion_050000.pt')
model = UNet(3, 128, [1, 1, 2, 2, 4, 4], 2, [16], 0, 1)
model.load_state_dict(ckpt['ema'])
model = model.to('cuda')
betas = make_beta_schedule('linear', 1e-4, 2e-2, 1000)
diffusion = GaussianDiffusion(betas).to('cuda')
@torch.no_grad()
def p_sample_loop(self, model, shape, device, noise_fn=torch.randn, capture_every=1000):
img = noise_fn(shape, device=device)
imgs = []
for i in tqdm_notebook(reversed(range(self.num_timesteps))):
img = self.p_sample(
model, img, torch.full((shape[0],), i, dtype=torch.int64).to(device), noise_fn=noise_fn
)
if i % capture_every == 0:
imgs.append(img)
imgs.append(img)
return imgs
imgs = p_sample_loop(diffusion, model, [16, 3, 128, 128], 'cuda', capture_every=10)
imgs = imgs[1:]
id = 0
grid = make_grid(torch.cat([i[id:id + 1] for i in imgs[:-1:4]], 0), nrow=5, normalize=True, range=(-1, 1))
Image.fromarray(grid.detach().mul(255).cpu().type(torch.uint8).permute(1, 2, 0).numpy())
I will consider to make generation scripts.