CFSM icon indicating copy to clipboard operation
CFSM copied to clipboard

Testing

Open CrMessiSuriJr opened this issue 8 months ago • 0 comments

When i use the following testing-generation script i am facing that issue.

import torch from PIL import Image import os import torchvision.transforms as transforms from synthesis_network import Generator # Import the Generator model from synthesis_network.py

Define paths

input_image_path = '/workspace/saransh/tinyface/Testing_Set/Gallery_Match/1_64.jpg' model_path = 'model_4.pt' output_dir = 'output'

Ensure the output directory exists

os.makedirs(output_dir, exist_ok=True)

Set device

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

Load the model

model = Generator() model.load_state_dict(torch.load(model_path, map_location=device)) model.to(device) model.eval()

Define image transformation

transform = transforms.Compose([ transforms.Resize((256, 256)), # Resize image to model input size transforms.ToTensor(), ])

Load and preprocess input image

input_image = Image.open(input_image_path).convert('RGB') input_tensor = transform(input_image).unsqueeze(0).to(device)

Generate output

with torch.no_grad(): output_tuple = model(input_tensor) output_image = output_tuple[1].squeeze(0).cpu() # Access the synthesized image (out)

Save output image

output_image_path = os.path.join(output_dir, 'synthesized_image.jpg') transforms.ToPILImage()(output_image).save(output_image_path)

print(f"Synthesized image saved at: {output_image_path}")


Missing key(s) in state_dict: "encoder.model.1.weight", "encoder.model.1.bias", "encoder.model.4.weight", "encoder.model.4.bias", "encoder.model.7.weight", "encoder.model.7.bias", "encoder.model.10.block.1.weight", "encoder.model.10.block.1.bias", "encoder.model.10.block.5.weight", "encoder.model.10.block.5.bias", "encoder.model.11.block.1.weight", "encoder.model.11.block.1.bias", "encoder.model.11.block.5.weight", "encoder.model.11.block.5.bias", "encoder.model.12.block.1.weight", "encoder.model.12.block.1.bias", "encoder.model.12.block.5.weight", "encoder.model.12.block.5.bias", "decoder.model.0.block.1.weight", "decoder.model.0.block.1.bias", "decoder.model.0.block.2.running_mean", "decoder.model.0.block.2.running_var", "decoder.model.0.block.5.weight", "decoder.model.0.block.5.bias", "decoder.model.0.block.6.running_mean", "decoder.model.0.block.6.running_var", "decoder.model.1.block.1.weight", "decoder.model.1.block.1.bias", "decoder.model.1.block.2.running_mean", "decoder.model.1.block.2.running_var", "decoder.model.1.block.5.weight", "decoder.model.1.block.5.bias", "decoder.model.1.block.6.running_mean", "decoder.model.1.block.6.running_var", "decoder.model.2.block.1.weight", "decoder.model.2.block.1.bias", "decoder.model.2.block.2.running_mean", "decoder.model.2.block.2.running_var", "decoder.model.2.block.5.weight", "decoder.model.2.block.5.bias", "decoder.model.2.block.6.running_mean", "decoder.model.2.block.6.running_var", "decoder.model.4.weight", "decoder.model.4.bias", "decoder.model.5.gamma", "decoder.model.5.beta", "decoder.model.8.weight", "decoder.model.8.bias", "decoder.model.9.gamma", "decoder.model.9.beta", "decoder.model.12.weight", "decoder.model.12.bias", "decoder.mlp.model.0.weight", "decoder.mlp.model.0.bias", "decoder.mlp.model.2.weight", "decoder.mlp.model.2.bias", "decoder.mlp.model.4.weight", "decoder.mlp.model.4.bias", "pca.U", "pca.mu". Unexpected key(s) in state_dict: "conv1.weight", "bn1.weight", "bn1.bias", "bn1.running_mean", "bn1.running_var", "bn1.num_batches_tracked", "prelu.weight", "layer1.0.bn1.weight", "layer1.0.bn1.bias", "layer1.0.bn1.running_mean", "layer1.0.bn1.running_var", "layer1.0.bn1.num_batches_tracked", "layer1.0.conv1.weight", "layer1.0.bn2.weight", "layer1.0.bn2.bias", "layer1.0.bn2.running_mean", "layer1.0.bn2.running_var", "layer1.0.bn2.num_batches_tracked", "layer1.0.prel

CrMessiSuriJr avatar Jun 12 '24 10:06 CrMessiSuriJr