vid2avatar
vid2avatar copied to clipboard
Please check the test code
Hi again, while I was testing the results, and I found something weird on the code
https://github.com/MoyGcc/vid2avatar/blob/main/code/v2a_model.py#L32C1-L48
class V2AModel(pl.LightningModule):
def __init__(self, opt) -> None:
super().__init__()
self.opt = opt
num_training_frames = opt.dataset.metainfo.end_frame - opt.dataset.metainfo.start_frame
self.betas_path = os.path.join(hydra.utils.to_absolute_path('..'), 'data', opt.dataset.metainfo.data_dir, 'mean_shape.npy')
self.gender = opt.dataset.metainfo.gender
self.model = V2A(opt.model, self.betas_path, self.gender, num_training_frames)
self.start_frame = opt.dataset.metainfo.start_frame
self.end_frame = opt.dataset.metainfo.end_frame
self.training_modules = ["model"]
self.training_indices = list(range(self.start_frame, self.end_frame))
self.body_model_params = BodyModelParams(num_training_frames, model_type='smpl')
self.load_body_model_params()
optim_params = self.body_model_params.param_names
for param_name in optim_params:
self.body_model_params.set_requires_grad(param_name, requires_grad=True)
self.training_modules += ['body_model_params']
self.loss = Loss(opt.model.loss)
def load_body_model_params(self):
body_model_params = {param_name: [] for param_name in self.body_model_params.param_names}
data_root = os.path.join('../data', self.opt.dataset.metainfo.data_dir)
data_root = hydra.utils.to_absolute_path(data_root)
body_model_params['betas'] = torch.tensor(np.load(os.path.join(data_root, 'mean_shape.npy'))[None], dtype=torch.float32)
body_model_params['global_orient'] = torch.tensor(np.load(os.path.join(data_root, 'poses.npy'))[self.training_indices][:, :3], dtype=torch.float32)
body_model_params['body_pose'] = torch.tensor(np.load(os.path.join(data_root, 'poses.npy'))[self.training_indices] [:, 3:], dtype=torch.float32)
body_model_params['transl'] = torch.tensor(np.load(os.path.join(data_root, 'normalize_trans.npy'))[self.training_indices], dtype=torch.float32)
for param_name in body_model_params.keys():
self.body_model_params.init_parameters(param_name, body_model_params[param_name], requires_grad=False)
I think you should not run self.load_body_model_params() at the test phase. At the test phase, the body model parameters are already initialized by checkpoint, and If you run this function, the training initial value (stored as a file) will be overwrited, so don't you think you're not going to use the trained pose parameters at the test phase?
Is there anything that I'm missing? Please advise.
Thank you!