I copied all files from RIFE 4.18 and also change paths in .py files
Still getting this error
Traceback (most recent call last):
File "/AFI-ForwardDeduplicate/interpolate_video_forward.py", line 93, in
model.load_state_dict(convert(torch.load('weights/flownet.pkl')))
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 2189, in load_state_dict
raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
RuntimeError: Error(s) in loading state_dict for IFNet:
Unexpected key(s) in state_dict: "teacher.conv0.0.0.weight", "teacher.conv0.0.0.bias", "teacher.conv0.1.0.weight", "teacher.conv0.1.0.bias", "teacher.convblock.0.beta", "teacher.convblock.0.conv.weight", "teacher.convblock.0.conv.bias", "teacher.convblock.1.beta", "teacher.convblock.1.conv.weight", "teacher.convblock.1.conv.bias", "teacher.convblock.2.beta", "teacher.convblock.2.conv.weight", "teacher.convblock.2.conv.bias", "teacher.convblock.3.beta", "teacher.convblock.3.conv.weight", "teacher.convblock.3.conv.bias", "teacher.convblock.4.beta", "teacher.convblock.4.conv.weight", "teacher.convblock.4.conv.bias", "teacher.convblock.5.beta", "teacher.convblock.5.conv.weight", "teacher.convblock.5.conv.bias", "teacher.convblock.6.beta", "teacher.convblock.6.conv.weight", "teacher.convblock.6.conv.bias", "teacher.convblock.7.beta", "teacher.convblock.7.conv.weight", "teacher.convblock.7.conv.bias", "teacher.lastconv.0.weight", "teacher.lastconv.0.bias".
I renamed rife48.pkl to flownet.pkl
Please tell how to fix this
Getting This
interpolate_video_forward.py", line 94, in
model.load_state_dict(torch.load('weights/flownet.pkl'))
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 2189, in load_state_dict
raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
RuntimeError: Error(s) in loading state_dict for IFNet:
Missing key(s) in state_dict: "block0.conv0.0.0.weight", "block0.conv0.0.0.bias", "block0.conv0.1.0.weight", "block0.conv0.1.0.bias", "block0.convblock.0.beta", "block0.convblock.0.conv.weight", "block0.convblock.0.conv.bias", "block0.convblock.1.beta", "block0.convblock.1.conv.weight", "block0.convblock.1.conv.bias", "block0.convblock.2.beta", "block0.convblock.2.conv.weight", "block0.convblock.2.conv.bias", "block0.convblock.3.beta", "block0.convblock.3.conv.weight", "block0.convblock.3.conv.bias", "block0.convblock.4.beta", "block0.convblock.4.conv.weight", "block0.convblock.4.conv.bias", "block0.convblock.5.beta", "block0.convblock.5.conv.weight", "block0.convblock.5.conv.bias", "block0.convblock.6.beta", "block0.convblock.6.conv.weight", "block0.convblock.6.conv.bias", "block0.convblock.7.beta", "block0.convblock.7.conv.weight", "block0.convblock.7.conv.bias", "block0.lastconv.0.weight", "block0.lastconv.0.bias", "block1.conv0.0.0.weight", "block1.conv0.0.0.bias", "block1.conv0.1.0.weight", "block1.conv0.1.0.bias", "block1.convblock.0.beta", "block1.convblock.0.conv.weight", "block1.convblock.0.conv.bias", "block1.convblock.1.beta", "block1.convblock.1.conv.weight", "block1.convblock.1.conv.bias", "block1.convblock.2.beta", "block1.convblock.2.conv.weight", "block1.convblock.2.conv.bias", "block1.convblock.3.beta", "block1.convblock.3.conv.weight", "block1.convblock.3.conv.bias", "block1.convblock.4.beta", "block1.convblock.4.conv.weight", "block1.convblock.4.conv.bias", "block1.convblock.5.beta", "block1.convblock.5.conv.weight", "block1.convblock.5.conv.bias", "block1.convblock.6.beta", "block1.convblock.6.conv.weight", "block1.convblock.6.conv.bias", "block1.convblock.7.beta", "block1.convblock.7.conv.weight", "block1.convblock.7.conv.bias", "block1.lastconv.0.weight", "block1.lastconv.0.bias", "block2.conv0.0.0.weight", "block2.conv0.0.0.bias", "block2.conv0.1.0.weight", "block2.conv0.1.0.bias", "block2.convblock.0.beta", "block2.convblock.0.conv.weight", "block2.convblock.0.conv.bias", "block2.convblock.1.beta", "block2.convblock.1.conv.weight", "block2.convblock.1.conv.bias", "block2.convblock.2.beta", "block2.convblock.2.conv.weight", "block2.convblock.2.conv.bias", "block2.convblock.3.beta", "block2.convblock.3.conv.weight", "block2.convblock.3.conv.bias", "block2.convblock.4.beta", "block2.convblock.4.conv.weight", "block2.convblock.4.conv.bias", "block2.convblock.5.beta", "block2.convblock.5.conv.weight", "block2.convblock.5.conv.bias", "block2.convblock.6.beta", "block2.convblock.6.conv.weight", "block2.convblock.6.conv.bias", "block2.convblock.7.beta", "block2.convblock.7.conv.weight", "block2.convblock.7.conv.bias", "block2.lastconv.0.weight", "block2.lastconv.0.bias", "block3.conv0.0.0.weight", "block3.conv0.0.0.bias", "block3.conv0.1.0.weight", "block3.conv0.1.0.bias", "block3.convblock.0.beta", "block3.convblock.0.conv.weight", "block3.convblock.0.conv.bias", "block3.convblock.1.beta", "block3.convblock.1.conv.weight", "block3.convblock.1.conv.bias", "block3.convblock.2.beta", "block3.convblock.2.conv.weight", "block3.convblock.2.conv.bias", "block3.convblock.3.beta", "block3.convblock.3.conv.weight", "block3.convblock.3.conv.bias", "block3.convblock.4.beta", "block3.convblock.4.conv.weight", "block3.convblock.4.conv.bias", "block3.convblock.5.beta", "block3.convblock.5.conv.weight", "block3.convblock.5.conv.bias", "block3.convblock.6.beta", "block3.convblock.6.conv.weight", "block3.convblock.6.conv.bias", "block3.convblock.7.beta", "block3.convblock.7.conv.weight", "block3.convblock.7.conv.bias", "block3.lastconv.0.weight", "block3.lastconv.0.bias", "encode.cnn0.weight", "encode.cnn0.bias", "encode.cnn1.weight", "encode.cnn1.bias", "encode.cnn2.weight", "encode.cnn2.bias", "encode.cnn3.weight", "encode.cnn3.bias".
Unexpected key(s) in state_dict: "module.block0.conv0.0.0.weight", "module.block0.conv0.0.0.bias", "module.block0.conv0.1.0.weight", "module.block0.conv0.1.0.bias", "module.block0.convblock.0.beta", "module.block0.convblock.0.conv.weight", "module.block0.convblock.0.conv.bias", "module.block0.convblock.1.beta", "module.block0.convblock.1.conv.weight", "module.block0.convblock.1.conv.bias", "module.block0.convblock.2.beta", "module.block0.convblock.2.conv.weight", "module.block0.convblock.2.conv.bias", "module.block0.convblock.3.beta", "module.block0.convblock.3.conv.weight", "module.block0.convblock.3.conv.bias", "module.block0.convblock.4.beta", "module.block0.convblock.4.conv.weight", "module.block0.convblock.4.conv.bias", "module.block0.convblock.5.beta", "module.block0.convblock.5.conv.weight", "module.block0.convblock.5.conv.bias", "module.block0.convblock.6.beta", "module.block0.convblock.6.conv.weight", "module.block0.convblock.6.conv.bias", "module.block0.convblock.7.beta", "module.block0.convblock.7.conv.weight", "module.block0.convblock.7.conv.bias", "module.block0.lastconv.0.weight", "module.block0.lastconv.0.bias", "module.block1.conv0.0.0.weight", "module.block1.conv0.0.0.bias", "module.block1.conv0.1.0.weight", "module.block1.conv0.1.0.bias", "module.block1.convblock.0.beta", "module.block1.convblock.0.conv.weight", "module.block1.convblock.0.conv.bias", "module.block1.convblock.1.beta", "module.block1.convblock.1.conv.weight", "module.block1.convblock.1.conv.bias", "module.block1.convblock.2.beta", "module.block1.convblock.2.conv.weight", "module.block1.convblock.2.conv.bias", "module.block1.convblock.3.beta", "module.block1.convblock.3.conv.weight", "module.block1.convblock.3.conv.bias", "module.block1.convblock.4.beta", "module.block1.convblock.4.conv.weight", "module.block1.convblock.4.conv.bias", "module.block1.convblock.5.beta", "module.block1.convblock.5.conv.weight", "module.block1.convblock.5.conv.bias", "module.block1.convblock.6.beta", "module.block1.convblock.6.conv.weight", "module.block1.convblock.6.conv.bias", "module.block1.convblock.7.beta", "module.block1.convblock.7.conv.weight", "module.block1.convblock.7.conv.bias", "module.block1.lastconv.0.weight", "module.block1.lastconv.0.bias", "module.block2.conv0.0.0.weight", "module.block2.conv0.0.0.bias", "module.block2.conv0.1.0.weight", "module.block2.conv0.1.0.bias", "module.block2.convblock.0.beta", "module.block2.convblock.0.conv.weight", "module.block2.convblock.0.conv.bias", "module.block2.convblock.1.beta", "module.block2.convblock.1.conv.weight", "module.block2.convblock.1.conv.bias", "module.block2.convblock.2.beta", "module.block2.convblock.2.conv.weight", "module.block2.convblock.2.conv.bias", "module.block2.convblock.3.beta", "module.block2.convblock.3.conv.weight", "module.block2.convblock.3.conv.bias", "module.block2.convblock.4.beta", "module.block2.convblock.4.conv.weight", "module.block2.convblock.4.conv.bias", "module.block2.convblock.5.beta", "module.block2.convblock.5.conv.weight", "module.block2.convblock.5.conv.bias", "module.block2.convblock.6.beta", "module.block2.convblock.6.conv.weight", "module.block2.convblock.6.conv.bias", "module.block2.convblock.7.beta", "module.block2.convblock.7.conv.weight", "module.block2.convblock.7.conv.bias", "module.block2.lastconv.0.weight", "module.block2.lastconv.0.bias", "module.block3.conv0.0.0.weight", "module.block3.conv0.0.0.bias", "module.block3.conv0.1.0.weight", "module.block3.conv0.1.0.bias", "module.block3.convblock.0.beta", "module.block3.convblock.0.conv.weight", "module.block3.convblock.0.conv.bias", "module.block3.convblock.1.beta", "module.block3.convblock.1.conv.weight", "module.block3.convblock.1.conv.bias", "module.block3.convblock.2.beta", "module.block3.convblock.2.conv.weight", "module.block3.convblock.2.conv.bias", "module.block3.convblock.3.beta", "module.block3.convblock.3.conv.weight", "module.block3.convblock.3.conv.bias", "module.block3.convblock.4.beta", "module.block3.convblock.4.conv.weight", "module.block3.convblock.4.conv.bias", "module.block3.convblock.5.beta", "module.block3.convblock.5.conv.weight", "module.block3.convblock.5.conv.bias", "module.block3.convblock.6.beta", "module.block3.convblock.6.conv.weight", "module.block3.convblock.6.conv.bias", "module.block3.convblock.7.beta", "module.block3.convblock.7.conv.weight", "module.block3.convblock.7.conv.bias", "module.block3.lastconv.0.weight", "module.block3.lastconv.0.bias", "module.encode.cnn0.weight", "module.encode.cnn0.bias", "module.encode.cnn1.weight", "module.encode.cnn1.bias", "module.encode.cnn2.weight", "module.encode.cnn2.bias", "module.encode.cnn3.weight", "module.encode.cnn3.bias", "module.teacher.conv0.0.0.weight", "module.teacher.conv0.0.0.bias", "module.teacher.conv0.1.0.weight", "module.teacher.conv0.1.0.bias", "module.teacher.convblock.0.beta", "module.teacher.convblock.0.conv.weight", "module.teacher.convblock.0.conv.bias", "module.teacher.convblock.1.beta", "module.teacher.convblock.1.conv.weight", "module.teacher.convblock.1.conv.bias", "module.teacher.convblock.2.beta", "module.teacher.convblock.2.conv.weight", "module.teacher.convblock.2.conv.bias", "module.teacher.convblock.3.beta", "module.teacher.convblock.3.conv.weight", "module.teacher.convblock.3.conv.bias", "module.teacher.convblock.4.beta", "module.teacher.convblock.4.conv.weight", "module.teacher.convblock.4.conv.bias", "module.teacher.convblock.5.beta", "module.teacher.convblock.5.conv.weight", "module.teacher.convblock.5.conv.bias", "module.teacher.convblock.6.beta", "module.teacher.convblock.6.conv.weight", "module.teacher.convblock.6.conv.bias", "module.teacher.convblock.7.beta", "module.teacher.convblock.7.conv.weight", "module.teacher.convblock.7.conv.bias", "module.teacher.lastconv.0.weight", "module.teacher.lastconv.0.bias".
IFNet_HDv3.py
def forward(self, x, timestep=0.5, scale_list=[8, 4, 2, 1], training=False, fastmode=True, ensemble=False):
if training == False:
channel = x.shape[1] // 2
img0 = x[:, :channel]
img1 = x[:, channel:]
if not torch.is_tensor(timestep):
timestep = (x[:, :1].clone() * 0 + 1) * timestep
else:
timestep = timestep.repeat(1, 1, img0.shape[2], img0.shape[3])
f0 = self.encode(img0[:, :3])
f1 = self.encode(img1[:, :3])
flow_list = []
merged = []
mask_list = []
warped_img0 = img0
warped_img1 = img1
flow = None
mask = None
loss_cons = 0
block = [self.block0, self.block1, self.block2, self.block3]
for i in range(4):
if flow is None:
flow, mask, feat = block[i](torch.cat((img0[:, :3], img1[:, :3], f0, f1, timestep), 1), None,
scale=scale_list[i])
if ensemble:
print("warning: ensemble is not supported since RIFEv4.21")
else:
wf0 = warp(f0, flow[:, :2])
wf1 = warp(f1, flow[:, 2:4])
fd, m0, feat = block[i](
torch.cat((warped_img0[:, :3], warped_img1[:, :3], wf0, wf1, timestep, mask, feat), 1), flow,
scale=scale_list[i])
if ensemble:
print("warning: ensemble is not supported since RIFEv4.21")
else:
mask = m0
flow = flow + fd
mask_list.append(mask)
flow_list.append(flow)
warped_img0 = warp(img0, flow[:, :2])
warped_img1 = warp(img1, flow[:, 2:4])
merged.append((warped_img0, warped_img1))
mask = torch.sigmoid(mask)
merged[3] = (warped_img0 * mask + warped_img1 * (1 - mask))
return merged[3]