Traceback (most recent call last):
File "wav2lip_train.py", line 372, in
train(device, model, train_data_loader, test_data_loader, optimizer,
File "wav2lip_train.py", line 223, in train
sync_loss = get_sync_loss(mel, g)
File "wav2lip_train.py", line 196, in get_sync_loss
a, v = syncnet(mel, g)
File "/root/miniconda3/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1110, in _call_impl
return forward_call(*input, **kwargs)
File "/root/wav2lip_288x288/models/syncnetv2.py", line 63, in forward
face_embedding = self.face_encoder(face_sequences)
File "/root/miniconda3/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1110, in _call_impl
return forward_call(*input, **kwargs)
File "/root/miniconda3/lib/python3.8/site-packages/torch/nn/modules/container.py", line 141, in forward
input = module(input)
File "/root/miniconda3/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1110, in _call_impl
return forward_call(*input, **kwargs)
File "/root/wav2lip_288x288/models/conv2.py", line 17, in forward
out = self.conv_block(x)
File "/root/miniconda3/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1110, in _call_impl
return forward_call(*input, **kwargs)
File "/root/miniconda3/lib/python3.8/site-packages/torch/nn/modules/container.py", line 141, in forward
input = module(input)
File "/root/miniconda3/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1110, in _call_impl
return forward_call(*input, **kwargs)
File "/root/miniconda3/lib/python3.8/site-packages/torch/nn/modules/batchnorm.py", line 168, in forward
return F.batch_norm(
File "/root/miniconda3/lib/python3.8/site-packages/torch/nn/functional.py", line 2419, in batch_norm
_verify_batch_size(input.size())
File "/root/miniconda3/lib/python3.8/site-packages/torch/nn/functional.py", line 2387, in _verify_batch_size
raise ValueError("Expected more than 1 value per channel when training, got input size {}".format(size))
ValueError: Expected more than 1 value per channel when training, got input size torch.Size([1, 512, 1, 1])
hi @Ezrealz , did you solve it?